--- /dev/null
+# ignore all build folders
+/build*/
+# ignore backup files
+*~
+# ignore Python files
+*.pyc
+# ignore files generated during python setup.py sdist
+MANIFEST
+_skbuild/
+dist
--- /dev/null
+---
+include:
+ - project: 'core/ci-config'
+ ref: master
+ file: 'config/common/master.yml'
+ - project: 'core/ci-config'
+ ref: master
+ file: 'jobs/common/master.yml'
+
+before_script:
+ - . /duneci/bin/duneci-init-job
+
+variables:
+ DUNECI_TEST_LABELS: quick
+ DUNE_TEST_EXPECTED_VC_IMPLEMENTATION: SSE2
+
+debian:10 gcc-7-17--expensive:
+ # This image has Vc
+ image: registry.dune-project.org/docker/ci/debian:10
+ script: duneci-standard-test
+ stage: test
+ # allow expensive tests
+ variables:
+ DUNECI_CXXFLAGS: -mavx
+ DUNECI_TEST_LABELS: ""
+ DUNECI_TOOLCHAIN: gcc-7-17
+ DUNE_TEST_EXPECTED_VC_IMPLEMENTATION: AVX
+ # require AVX to properly test Vc
+ tags: [duneci, "iset:avx"]
+ # allowed to fail to e.g. do no hold up a merge when a runner supporting avx
+ # is unavailable
+ allow_failure: true
+
+
+debian-11-gcc-9-17-python:
+ image: registry.dune-project.org/docker/ci/debian:11
+ script: duneci-standard-test
+ stage: test
+ variables:
+ DUNECI_TOOLCHAIN: gcc-9-17
+ # so we need some variables to build the dune-py module during execution of the first python test:
+ # we need to find the dune mdoule
+ DUNE_CONTROL_PATH: /duneci/modules:$CI_PROJECT_DIR
+ # the position for the dune-py module
+ DUNE_PY_DIR: /duneci/modules/dune-py
+ # during dune-py build this variable is used - do know a way to access
+ # the CMAKE_FLAGS used to build the modules...
+ DUNE_CMAKE_FLAGS: "CC=gcc-9 CXX=g++-9 -DCMAKE_CXX_FLAGS='-std=c++17 -O2 -g -Wall -fdiagnostics-color=always' -DDUNE_ENABLE_PYTHONBINDINGS=ON -DDUNE_MAX_TEST_CORES=4 -DBUILD_SHARED_LIBS=TRUE -DDUNE_PYTHON_INSTALL_LOCATION=none -DCMAKE_POSITION_INDEPENDENT_CODE=TRUE -DCMAKE_DISABLE_FIND_PACKAGE_LATEX=TRUE -DCMAKE_DISABLE_FIND_PACKAGE_Alberta=TRUE -DCMAKE_DISABLE_FIND_PACKAGE_Vc=TRUE -DCMAKE_DISABLE_DOCUMENTATION=TRUE"
+ # cmake flags we use for all dune moudle - important is that build shared libs is set (need some better way of doing this)
+ DUNECI_CMAKE_FLAGS: $DUNE_CMAKE_FLAGS
+ # finally set the python path to all modules
+ PYTHONPATH: $CI_PROJECT_DIR/build-cmake/python
+ tags: [duneci]
+
+system-test:
+ stage: downstream
+ variables:
+ CI_BUILD_REF_NAME: $CI_COMMIT_REF_NAME
+ DUNECI_TEST_LABELS: ""
+ trigger:
+ project: infrastructure/dune-nightly-test
+ branch: core
+ strategy: depend
+ allow_failure: true
--- /dev/null
+Benjamin Bykowski <benjamin.bykowski@rwth-aachen.de> Convex Function <329364@wright.mathepool.rwth-aachen.de>
+
--- /dev/null
+# Release 2.8
+
+- Set minimal required CMake version in cmake to >= 3.13.
+
+- Python bindings have been moved from `dune-python` to the respective
+ core modules. `dune-python` is now obsolete. To activate Python bindings the
+ CMake flag `DUNE_ENABLE_PYTHONBINDINGS` needs to be turned on (default is off).
+ Furthermore, flags for either shared library or position independent code
+ need to be used.
+
+- Add `instance` method to MPIHelper that does not expect arguments for access
+ to the singleton object after initialization.
+
+- Remove the cmake check for `HAVE_MPROTECT` and also do not define this variable in the
+ `config.h` file. It is defined only inside the header `debugallocator.hh`.
+
+- Remove deprecated type-traits `has_nan`, `is_indexable`, and
+ `is_range`, use the CamelCase versions instead.
+
+- Deprecate fallback implementations `Dune::Std::apply`, `Dune::Std::bool_constant`, and
+ `Dune::Std::make_array` in favor of std c++ implementations.
+
+- Deprecate type traits `Dune::Std::to_false_type`, `Dune::Std::to_true_type`.
+ `Dune::AlwaysFalse` and `Dune::AlwaysTrue` (from header `dune/common/typetraits.hh`)
+ now inherit from `std::true_type` and `std::false_type` and are therefore
+ exact replacements for these two type traits.
+
+- Deprecate fallback implementation `Dune::Std::conjunction`, `Dune::Std::disjunction`,
+ and `Dune::Std::negation`. Use std c++17 implementations.
+
+- Deprecate fallback implementations `Dune::Std::is_callable` and `Dune::Std::is_invocable`.
+ Use C++17 std implementation `std::is_invocable` instead. Be aware that
+ `Dune::Std::is_callable` and `std::is_invocable` are slightly different concepts,
+ since `std::is_invocable` also covers invocation of pointers to member functions
+ and pointers to data members. To additionally constrain for that case,
+ there is now `Dune::IsCallable` (in `dune/common/typetraits.hh`)
+
+- Added `Dune::IsCallable` (in `dune/common/typetraits.hh`) which is
+ an improved version of the deprecated `Dune::Std::is_callable` and allows
+ for checking if a type is a function object type,
+ i.e. has a ()-operator than can be invoked with the given argument types and
+ returns a specified return type.
+
+- Remove c++ feature tests in cmake for existing c++-17 standards. Add default
+ defines for `DUNE_HAVE_CXX_BOOL_CONSTANT`, `DUNE_HAVE_CXX_EXPERIMENTAL_BOOL_CONSTANT`,
+ `DUNE_HAVE_HEADER_EXPERIMENTAL_TYPE_TRAITS`, `DUNE_HAVE_CXX_APPLY`,
+ `DUNE_HAVE_CXX_EXPERIMENTAL_APPLY`, `HAVE_IS_INDEXABLE_SUPPORT` in `config.h` for one
+ more release.
+
+- Add backport of `FindPkgConfig.cmake` from cmake 3.19.4 since there was a bug in
+ an older find module leading to problems finding tbb in debian:10.
+
+- Update the FindTBB cmake module to search for the `TBBConfig.cmake` or the `tbb.pc`
+ file containing the configuration. Add the `AddTBBFlags.cmake` file containing
+ the macro `add_dune_tbb_flags` that must be called to use TBB.
+
+- Set minimal required MPI version to >= 3.0.
+
+- Previous versions of dune-common imported `std::shared_ptr` and `std::make_shared`
+ into the `Dune` namespace. dune-common-2.8 stops doing that.
+
+- The file `function.hh` is deprecated. It contained the two base classes
+ `Function` and `VirtualFunction`. In downstream codes, these should be
+ replaced by C++ function objects, `std::function` etc.
+
+- Python bindings have been moved from the `dune-python` module which is now
+ obsolete. To activate Python bindings the CMake flag
+ `DUNE_ENABLE_PYTHONBINDINGS` needs to be turned on (default is off).
+ Furthermore, flags for either shared library or position independent code
+ needs to be used.
+
+- Support for distributing DUNE modules as python packages has been added.
+ Package meta data is parsed in `packagemetadata.py` from the dune.module file.
+ A script `/bin/dunepackaging.py` was added to generate package files
+ (`setup.py`, `pyproject.toml`) that can also be used to upload packages to
+ the Python Package Index. For a brief description of what is required to add
+ this support to existing dune modules see
+ https://gitlab.dune-project.org/core/dune-common/-/merge_requests/900
+ Note that this can also be used to generate a package for dune modules
+ that don't provide Python bindings.
+
+- Eigenvectors of symmetric 2x2 `FieldMatrix`es are now computed correctly
+ even when they have zero eigenvalues.
+
+- Eigenvectors and values are now also supported for matrices and
+ vectors with field_type being float.
+
+- The `ParameterTreeParser::readINITree` can now directly construct and
+ return a parameter tree by using the new overload without parameter tree
+ argument.
+
+- MPIHelper::instance can now be called without parameters if it was
+ already initialized.
+
+- MPITraits now support complex.
+
+- There is now a matrix wrapper transpose(M) that represents the
+ transpose of a matrix.
+
+## build-system
+
+- The name mangling for Fortran libraries like BLAS and LAPACK is now done
+ without a Fortran compiler. So a Fortran compiler is no longer a built
+ requirement.
+
+- `dune_list_filter` is deprecated and will be removed after Dune 2.8. Use
+ `list(FILTER ...)` introduced by CMake 3.6 instead.
+
+- `ToUniquePtr` is deprecated and will be removed after Dune 2.8. Use
+ `std::unique_ptr` or `std::shared_ptr` instead.
+
+- Remove the CMake options `DUNE_BUILD_BOTH_LIBS` and
+ `DUNE_USE_ONLY_STATIC_LIBS`. Use the default CMake way instead by
+ setting `BUILD_SHARED_LIBS` accordingly. Building both static
+ and shared libraries is no longer supported.
+
+- Remove the CMake function deprecated `inkscape_generate_png_from_svg`.
+
+- Remove the old and deprecated use of UseLATEX.cmake.
+ `dune_add_latex_document' is a redirection to `add_latex_document`
+ which internally uses `latexmk`.
+
+- Many of the CMake find modules habe been rewritten to use CMake's
+ imported targets. These targets are also used in the DUNE CMake
+ package configuration files, where they might appear in e.g. the
+ dune-module_LIBRARIES. If you do not use the DUNE CMake build system
+ the linker might complain about e.g. METIS::METIS not being
+ found. In that case your either need to use the CMake modules shipped with
+ DUNE or create these targets manually.
+
+## Deprecations and removals
+
+- Remove deprecated header `dune/common/std/memory.hh`; use `<memory>`
+ instead.
+
+- Deprecate header `dune/common/std/utility.hh`; use `<utility>` instead.
+
+- Deprecate header `dune/common/std/variant.hh`; use `<variant>` instead.
+
+- Remove incomplete CPack support that was never used to make an official
+ build or tarball.
+
+- Both macros `DUNE_DEPRECATED` and `DUNE_DEPRECATED_MSG(text)` are
+ deprecated and will be removed after Dune 2.8. Use C++14 attribute
+ `[[deprecated]]` but be aware that it is no drop-in replacement,
+ as it must be sometimes placed at different position in the code.
+
+- The macros `DUNE_UNUSED` is deprecated and will be removed after
+ Dune 2.8. Use C++17's attribute `[[maybe_unused]]` instead, but be
+ aware that it is no drop-in replacement, as it must be sometimes
+ placed at different position in the code.
+ The use of `DUNE_UNUSED_PARAMETER` is discouraged.
+
+- Dune::void_t has been deprecated and will be removed. Please use
+ std::void_t
+
+- Dune::lcd and Dune::gcd are deprecated and will be removed. Please
+ use std::lcd and std::gcd.
+
+- VariableSizeCommunicator::fixedsize has been renamed to FixedSize in
+ line with the communicator changes of dune-grid. The old method will
+ be removed in 2.9.
+
+# Release 2.7
+
+- Added fallback implementation to C++20 feature: `std::identity`.
+
+- A helper class `TransformedRangeView` was added representing a
+ transformed version of a given range using an unary transformation
+ function. The transformation is done on the fly leaving the wrapped
+ range unchanged.
+
+- `dune-common` now provides an implementation of `std::variant` for all compilers
+ that support C++14. It is contained in the file `dune/common/std/variant.hh`,
+ in the namespace `Dune::Std::`. If your compiler does support C++17 the
+ implementation in `dune-common` is automatically disabled, and the official
+ implementation from the standard library is used instead.
+
+- By popular demand, dense vectors and matrices like `FieldVector` and `FieldMatrix`
+ now have additional operators. In particular, there are
+ - Vector = - Vector
+ - Matrix = - Matrix
+ While these two work for any vector or matrix class that inherits from `DenseVector`
+ or `DenseMatrix`, the following additional methods only work for `FieldVector`:
+ - Vector = Scalar * Vector
+ - Vector = Vector * Scalar
+ - Vector = Vector / Scalar
+ Correspondingly, the `FieldMatrix` class now has
+ - Matrix = Matrix + Matrix
+ - Matrix = Matrix - Matrix
+ - Matrix = Scalar * Matrix
+ - Matrix = Matrix * Scalar
+ - Matrix = Matrix / Scalar
+ - Matrix = Matrix * Matrix
+ Note that the operators
+ - Vector = Vector + Vector
+ - Vector = Vector - Vector
+ have been introduced earlier.
+
+- The matrix size functions `N()` and `M()` of `FieldMatrix` and `DiagonalMatrix` can now be used
+ in a `constexpr` context.
+
+- There is now (finally!) a method `power` in the file `math.hh` that computes
+ powers with an integer exponent, and is usable in compile-time expressions.
+ The use of the old power methods in `power.hh` is henceforth discouraged.
+
+- `FieldMatrix` and `FieldVector` are now [trivially copyable types]
+ if the underlying field type is trivially copyable.
+
+ As a consequence the copy assignment operator of the `DenseVector`
+ class can no longer be used; just avoid going through
+ `DenseVector` and use the real vector type instead
+ (e.g. `FieldVector`).
+
+ [trivially copyable types]: https://en.cppreference.com/w/cpp/named_req/TriviallyCopyable
+
+## Deprecations and removals
+
+- The `VectorSize` helper has been deprecated. The `size()` method of
+ vectors should be called directly instead.
+
+- Drop support for Python 2. Only Python 3 works with Dune 2.7.
+
+- Support for older version than METIS 5.x and ParMETIS 4.x is deprecated and will be
+ removed after Dune 2.7.
+
+- Deprecated header `dune/common/parallel/collectivecommunication.hh` which will be
+ removed after Dune 2.7. Use dune/common/parallel/communication.hh instead!
+
+- Deprecated header `dune/common/parallel/mpicollectivecommunication.hh` which will be
+ removed after Dune 2.7. Use dune/common/parallel/mpicommunication.hh instead!
+
+## build-system
+
+- When run with an absolute build directory, `dunecontrol` now exposes the root build
+ directory to CMake in the variable `DUNE_BUILD_DIRECTORY_ROOT_PATH`.
+
+ See core/dune-common!542
+
+- The `dune_symlink_to_sources_files` CMake function now has a `DESTINATION` argument.
+
+- Dune no longer applies architecture flags detected by the Vc library
+ automatically. This applies to all targets that link to Vc explicitly (with
+ `add_dune_vc_flags()`) or implicitly (with `dune_enable_all_packages()`).
+ If you do want to make use of extended architecture features, set the
+ architecture explicitly in the compiler options, e.g. by specifying
+ ```sh
+ CMAKE_FLAGS="-DCMAKE_CXX_FLAGS=-march=native"
+ ```
+ in your opts-file. Vc also sets compiler options to select a particular C++
+ abi (`-fabi-version` and `-fabi-compat-version`), these continue to be
+ applied automatically.
+
+ See core/dune-common!677
+
+- `FindParMETIS.cmake` assumes METIS was found first using `FindMETIS.cmake` and does not
+ longer try to find METIS itself.
+
+- The `inkscape_generate_png_from_svg` CMake function is deprecated and will be removed
+ after 2.7.
+
+- LaTeX documents can now be built using `latexmk` with the help of UseLatexmk.cmake's
+ `add_latex_document`. `dune_add_latex_document` will use the new way of calling
+ LaTeX when the first argument is `SOURCE`. As a side effect, in-source builds are
+ supported, too. The old function call and UseLATEX.cmake are deprecated and will be
+ removed after 2.7.
+
+ See core/dune-common!594
+
+- The build system has learned some new tricks when creating or looking for the Python virtualenv:
+ When using an absolute build directory with `dunecontrol`, the virtualenv will now be placed
+ directly inside the root of the build directory hierarchy in the directory `dune-python-env`.
+ This should make it much easier to actually find the virtualenv and also avoids some corner
+ cases where the build system would create multiple virtualenvs that did not know about each
+ other. This behavior can be disabled by setting
+ `DUNE_PYTHON_EXTERNAL_VIRTUALENV_FOR_ABSOLUTE_BUILDDIR=0`.
+ If you need even more precise control about the location of the virtualenv, you can now also
+ directly set the CMake variable `DUNE_PYTHON_VIRTUALENV_PATH` to the directory in which to
+ create the virtualenv.
+
+# Release 2.6
+
+**This release is dedicated to Elias Pipping (1986-2017).**
+
+- New class `IntegralRange<integral_type>` and free standing function
+ `range` added, providing a feature similar to Python's `range` function:
+ ```
+ for (const auto &i : range(5,10))
+ ```
+ See core/dune-common!325
+
+- `Dune::array` was deprecated, use `std::array` from <array> instead.
+ Instead of `Dune::make_array`, use `Dune::Std::make_array`
+ from dune/common/std/make_array.hh
+ and instead of `Dune::fill_array` use `Dune::filledArray`
+ from dune/common/filledarray.hh.`
+
+ See core/dune-common!359
+
+- The `DUNE_VERSION...` macros are deprecated use the new macros
+ `DUNE_VERSION_GT`, `DUNE_VERSION_GTE`, `DUNE_VERSION_LTE`, and
+ `DUNE_VERSION_LT` instead.
+
+ See core/dune-common!329
+
+- Added some additional fallback implementation to C++17 features:
+ (e.g. `optional`, `conjunction`, `disjunction`)
+
+- `makeVirtualFunction`:
+ allows to easily convert any function object (e.g. lambda) to a `VirtualFunction`
+
+ See core/dune-common!282
+
+- Added infrastructure for explicit vectorization *(experimental)*
+
+ We added experimental support for SIMD data types. We currently
+ provide infrastructure to use [Vc](https://github.com/VcDevel/Vc)
+ and some helper functions to transparently switch between scalar data
+ types and SIMD data types.
+
+- `FieldMatrix` now has experimental support for SIMD types from
+ [Vc](https://github.com/VcDevel/Vc) as field types.
+
+ See core/dune-common!121
+
+## build-system
+
+- Variables passed via `dunecontrol`'s command `--configure-opts=..` are now
+ added to the CMake flags.
+
+- Bash-style variables which are passed to `dunecontrol`'s command `configure-opts`
+ are no longer transformed to their equivalent CMake command. Pass
+ `-DCMAKE_C_COMPILER=gcc` instead of `CC=gcc`.
+
+- Added support for modules providing additional Python modules or bindings.
--- /dev/null
+cmake_minimum_required(VERSION 3.13)
+project(dune-common LANGUAGES C CXX)
+
+# make sure our own modules are found
+list(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake/modules)
+
+# set the script dir for the macros.
+set(DUNE_COMMON_SCRIPT_DIR ${PROJECT_SOURCE_DIR}/cmake/scripts)
+
+#include the dune macros
+include(DuneMacros)
+
+# start a dune project with information from dune.module
+dune_project()
+
+# add subdirectories to execute CMakeLists.txt there
+add_subdirectory(bin)
+add_subdirectory(cmake)
+add_subdirectory(doc)
+add_subdirectory(dune)
+add_subdirectory(lib)
+add_subdirectory(share)
+
+# if Python bindings are enabled, include necessary sub directories.
+if(DUNE_ENABLE_PYTHONBINDINGS)
+ add_subdirectory(python)
+ dune_python_install_package(PATH "python")
+endif()
+
+# finalize the dune project, e.g. generating config.h etc.
+finalize_dune_project()
--- /dev/null
+Contributing to the Dune Core Modules
+=====================================
+
+You've squashed an annoying bug or implemented a nifty new feature in DUNE?
+And you're willing to share your improvements with the community? This page
+explains how to get those changes to us and what to take care of.
+
+Take a look at the DUNE coding style
+------------------------------------
+
+Your work will enjoy much smoother sailing if you take a look at the [Coding
+Style](https://dune-project.org/dev/codingstyle/) and try to stick to it with
+your changes. We understand that everyone has their personal preferences and
+that there is no such thing as the *right* coding style (in the end, it's a
+matter of taste), but DUNE is a pretty large project, and a consistent way of
+doing things really helps a lot when trying to find your way around a body of
+code as big as DUNE.
+
+Make sure to install the Whitespace Hook before starting to work, because
+our repositories enforce certain rules about whitespace and will not accept
+commits that violate those rules. And a developer will be much more motivated
+to merge your patch if doing so does not involve fixing a bunch of tab-based
+indentations that you inadvertently added as part of your changes
+
+Use Git to your advantage
+-------------------------
+
+We know, Git can be a bit daunting at first, but trust us, it's really worth
+investing half an hour to learn the basics! Even though you don't have any
+commit rights to the DUNE repositories, Git still allows you to create local
+commits on your machine, avoiding the usual ugly business of creating backup
+copies, copying around code in files, commenting and uncommenting variants etc.
+And when you're done and send the changes to us, we can simply import those
+commits into our repositories. That saves a lot of time and when your changes
+can be applied in five minutes using two or three commands, chances are a
+developer will much more easily find the time to do so. Git is really popular,
+so there are tons of tutorials all over the web. Here are some pointers:
+
+* http://try.github.io/ is a very quick, hands-down introduction
+ to Git that allows you to try out Git directly in your browser.
+ Requires a GitHub account to continue at some point, though.
+* http://git-scm.com/book is a very well-written and detailed resource
+ for all things Git. Chapter 2 is a great introduction to Git that also explains
+ a little bit how Git works, which really helps to reduce the number of
+ *WTF just happened?* moments. ;-)
+* http://eagain.net/articles/git-for-computer-scientists/ is a short and
+ sweet explanation of what Git does at a fundamental level - just the thing for
+ scientists! ;-)
+* http://git-scm.com/doc/ext is a collection of both introductory and
+ more in-depth Git resources.
+
+Whatever you do, make sure to set your Git identity so that the commits tell us who authored them!
+
+Getting the changes to us
+-------------------------
+
+You should get your changes to us in the following way:
+* Get an account for [our GitLab instance](http://gitlab.dune-project.org).
+* Fork the core module that you want to contribute to, just
+ as you would do on GitHub.
+* Push your changes to your fork on some branch.
+* Open a merge request using the branch you pushed your changes
+ to as the source branch and the master of the core module repository
+ as the target branch. GitLab will usually ask you about opening
+ a merge request if you browse it right after pushing to some branch.
+* Follow the discussion on the merge request to see what improvements
+ should be done to the branch before merging.
+
+If you have any questions or complaints about this workflow of
+contributing to Dune, please rant on the
+[dune-devel mailing list](mailto:dune-devel@lists.dune-project.org).
--- /dev/null
+LICENSE.md
\ No newline at end of file
--- /dev/null
+Installation Instructions
+=========================
+
+For a full explanation of the DUNE installation process please read
+the installation notes [0]. The following introduction is meant for
+the impatient.
+
+Getting started
+---------------
+
+Suppose you have downloaded all DUNE modules of interest to your
+computer and extracted then in one common directory. See [1] for a
+list of available modules.
+
+To compile the modules Dune has to check several components of
+your system and whether prerequisites within the modules are met. For
+the ease of users we have designed a custom build system on top of CMake.
+Run
+
+ ./dune-common/bin/dunecontrol all
+
+to commence those tests and build all modules you have
+downloaded. Don't worry about messages telling you that libraries are
+missing: they are only needed for grid-self-checks we need for
+developing.
+
+You can customize the build to your specific needs by using an options file
+(see below)
+
+ ./dune-common/bin/dunecontrol --opts=/path_to/file.opts
+
+If you did not tell dunecontrol to install with an options file you
+need to run
+
+ ./dune-common/bin/dunecontrol make install
+
+to install Dune (you may need root-permissions for the install
+part depending on the prefix set)
+
+A more comprehensive introduction to the build system can be found in [0].
+
+Passing options to the build process
+------------------------------------
+
+Using the dunecontrol script the following atomic commands can be
+executed:
+
+- configure (runs the CMake configuration tests for each module)
+- exec (executes a command in each module source directory)
+- bexec (executes a command in each module build directory)
+- make (builds each module)
+- update (updates the Git or Subversion version)
+
+The composite command all simply runs configure and make for
+each module.
+
+As it is often not convenient to specify the desired options after
+the duncontroll call, one can pass the options via a file specified
+by the --opts=<file> option. Specify the options via the variable
+
+ CMAKE_FLAGS=<flags>
+
+An example of an options file is
+
+ # use a special compiler (g++ version 5.0),
+ # install to a custom directory, default is /usr/local/bin,
+ # disable the external library SuperLU,
+ # and use Ninja-build instead of make as the build-tool
+ CMAKE_FLAGS="-DCMAKE_CXX_COMPILER=g++-5 -DCMAKE_INSTALL_PREFIX='/tmp/HuHu' -DCMAKE_DISABLE_FIND_PACKAGE_SuperLU=true -GNinja"
+
+Links
+-----
+
+0. https://www.dune-project.org/doc/installation
+1. https://dune-project.org/releases/
--- /dev/null
+Copyright holders:
+==================
+
+2015--2017 Marco Agnese
+2015 Martin Alkämper
+2003--2019 Peter Bastian
+2004--2020 Markus Blatt
+2013 Andreas Buhr
+2020--2021 Samuel Burbulla
+2011--2020 Ansgar Burchardt
+2004--2005 Adrian Burri
+2014 Benjamin Bykowski (may appear in the logs as "Convex Function")
+2014 Marco Cecchetti
+2018 Matthew Collins
+2006--2021 Andreas Dedner
+2019--2021 Nils-Arne Dreier
+2003 Marc Droske
+2003--2021 Christian Engwer
+2004--2020 Jorrit Fahlke
+2016 Thomas Fetzer
+2008--2017 Bernd Flemisch
+2013--2014 Christoph Gersbacher
+2017--2020 Janick Gerstenberger
+2015 Stefan Girke
+2005--2021 Carsten Gräser
+2015--2017 Felix Gruber
+2010--2021 Christoph Grüninger
+2006 Bernhard Haasdonk
+2015--2018 Claus-Justus Heine
+2015--2020 René Heß
+2017--2019 Stephan Hilb
+2017--2021 Lasse Hinrichsen
+2012--2013 Olaf Ippisch
+2020 Patrick Jaap
+2020 Liam Keegan
+2013--2021 Dominic Kempf
+2009 Leonard Kern
+2017--2018 Daniel Kienle
+2013 Torbjörn Klatt
+2003--2021 Robert Klöfkorn
+2017--2021 Timo Koch
+2005--2007 Sreejith Pulloor Kuttanikkad
+2012--2016 Arne Morten Kvarving
+2010--2014 Andreas Lauser
+2016--2019 Tobias Leibner
+2015 Lars Lubkoll
+2012--2017 Tobias Malkmus
+2007--2011 Sven Marnach
+2010--2017 Rene Milk
+2019--2020 Felix Müller
+2011--2019 Steffen Müthing
+2018 Lisa Julia Nebel
+2003--2006 Thimo Neubauer
+2011 Rebecca Neumann
+2008--2018 Martin Nolte
+2014 Andreas Nüßing
+2004--2005 Mario Ohlberger
+2019--2020 Santiago Ospina De Los Rios
+2014 Steffen Persvold
+2008--2017 Elias Pipping
+2021 Joscha Podlesny
+2011 Dan Popovic
+2017--2021 Simon Praetorius
+2009 Atgeirr Rasmussen
+2017--2020 Lukas Renelt
+2006--2014 Uli Sack
+2003--2020 Oliver Sander
+2006 Klaus Schneider
+2004 Roland Schulz
+2015 Nicolas Schwenck
+2016 Linus Seelinger
+2009--2014 BÃ¥rd Skaflestad
+2019 Henrik Stolzmann
+2012 Matthias Wohlmuth
+2011--2016 Jonathan Youett
+
+This Licence does not cover the header files taken from the
+[pybind11 project][pybind11] which are included here
+(`dune/python/pybind11`) together with their own [licence file][pybind11Licence].
+
+The DUNE library and headers are licensed under version 2 of the GNU
+General Public License (see below), with a special exception for
+linking and compiling against DUNE, the so-called "runtime exception."
+The license is intended to be similar to the GNU Lesser General
+Public License, which by itself isn't suitable for a template library.
+
+The exact wording of the exception reads as follows:
+
+ As a special exception, you may use the DUNE source files as part
+ of a software library or application without restriction.
+ Specifically, if other files instantiate templates or use macros or
+ inline functions from one or more of the DUNE source files, or you
+ compile one or more of the DUNE source files and link them with
+ other files to produce an executable, this does not by itself cause
+ the resulting executable to be covered by the GNU General Public
+ License. This exception does not however invalidate any other
+ reasons why the executable file might be covered by the GNU General
+ Public License.
+
+
+ GNU GENERAL PUBLIC LICENSE
+ Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users. This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it. (Some other Free Software Foundation software is covered by
+the GNU Library General Public License instead.) You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have. You must make sure that they, too, receive or can get the
+source code. And you must show them these terms so they know their
+rights.
+
+ We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+ Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software. If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+ Finally, any free program is threatened constantly by software
+patents. We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary. To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ GNU GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License. The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language. (Hereinafter, translation is included without limitation in
+the term "modification".) Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+ 1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+ 2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) You must cause the modified files to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ b) You must cause any work that you distribute or publish, that in
+ whole or in part contains or is derived from the Program or any
+ part thereof, to be licensed as a whole at no charge to all third
+ parties under the terms of this License.
+
+ c) If the modified program normally reads commands interactively
+ when run, you must cause it, when started running for such
+ interactive use in the most ordinary way, to print or display an
+ announcement including an appropriate copyright notice and a
+ notice that there is no warranty (or else, saying that you provide
+ a warranty) and that users may redistribute the program under
+ these conditions, and telling the user how to view a copy of this
+ License. (Exception: if the Program itself is interactive but
+ does not normally print such an announcement, your work based on
+ the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+ a) Accompany it with the complete corresponding machine-readable
+ source code, which must be distributed under the terms of Sections
+ 1 and 2 above on a medium customarily used for software interchange; or,
+
+ b) Accompany it with a written offer, valid for at least three
+ years, to give any third party, for a charge no more than your
+ cost of physically performing source distribution, a complete
+ machine-readable copy of the corresponding source code, to be
+ distributed under the terms of Sections 1 and 2 above on a medium
+ customarily used for software interchange; or,
+
+ c) Accompany it with the information you received as to the offer
+ to distribute corresponding source code. (This alternative is
+ allowed only for noncommercial distribution and only if you
+ received the program in object code or executable form with such
+ an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it. For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable. However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License. Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+ 5. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Program or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+ 6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+ 7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all. For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded. In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+ 9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation. If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+ 10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission. For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this. Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+ NO WARRANTY
+
+ 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+ 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+ Gnomovision version 69, Copyright (C) year name of author
+ Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+ `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+ <signature of Ty Coon>, 1 April 1989
+ Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs. If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License.
+
+[pybind11]: https://github.com/pybind/pybind11
+[pybind11Licence]: https://github.com/pybind/pybind11/blob/master/LICENSE
--- /dev/null
+DUNE-library
+============
+
+DUNE, the Distributed and Unified Numerics Environment is a modular toolbox
+for solving partial differential equations with grid-based methods.
+
+The main intention is to create slim interfaces allowing an efficient use of
+legacy and/or new libraries. Using C++ techniques DUNE allows one to use very
+different implementation of the same concept (i.e. grid, solver, ...) under
+a common interface with a very low overhead.
+
+DUNE was designed with flexibility in mind. It supports easy discretization
+using methods, like Finite Elements, Finite Volume and also Finite
+Differences. Through separation of data structures DUNE allows fast Linear
+Algebra like provided in the ISTL module, or usage of external libraries
+like blas.
+
+This package contains the basic DUNE common classes.
+
+Dependencies
+------------
+
+dune-common depends on the following software packages
+
+- pkg-config
+- Compiler (C, C++): GNU >=7 or Clang >= 5
+
+ Other compilers might work too, they need to support C++17 to the extend the
+ ones above do.
+
+The following software is recommended but optional:
+
+- MPI (either OpenMPI, lam, or mpich suffice)
+
+For a full explanation of the DUNE installation process please read
+the [installation notes][installation]. The following introduction is meant for
+the impatient.
+
+License
+-------
+
+The DUNE-library and headers are licensed under version 2 of the GNU
+General Public License, with the so-called "runtime exception", as
+follows:
+
+> As a special exception, you may use the DUNE source files as part
+> of a software library or application without restriction.
+> Specifically, if other files instantiate templates or use macros or
+> inline functions from one or more of the DUNE source files, or you
+> compile one or more of the DUNE source files and link them with
+> other files to produce an executable, this does not by itself cause
+> Athe resulting executable to be covered by the GNU General Public
+> License. This exception does not however invalidate any other
+> reasons why the executable file might be covered by the GNU General
+> Public License.
+
+This licence clones the one of the libstc++ library. For further
+implications of this library please see their [licence page][licence]
+
+See the file COPYING for full copying permissions.
+
+Installation
+------------
+
+Short installation instructions can be found in file INSTALL. For the
+full instructions please see [here][installation].
+
+Links
+-----
+
+0. https://www.dune-project.org/doc/installation
+1. https://dune-project.org/releases/
+2. https://dune-project.org/buildsystem/
+3. http://gcc.gnu.org/onlinedocs/libstdc++/faq.html#faq.license
+
+[installation]: https://www.dune-project.org/doc/installation
+[licence]: http://gcc.gnu.org/onlinedocs/libstdc++/faq.html#faq.license
\ No newline at end of file
--- /dev/null
+Please see the Dune bugtracker at www.dune-project.org for things to do.
+
--- /dev/null
+install(PROGRAMS
+ dune-ctest
+ duneproject
+ dunecontrol
+ dunepackaging.py
+ dune-git-whitespace-hook
+ rmgenerated.py
+ setup-dunepy.py
+ DESTINATION ${CMAKE_INSTALL_BINDIR})
--- /dev/null
+#! /usr/bin/env python3
+#
+# Wrapper around CTest for DUNE
+#
+# CTest returns with an error status not only when tests failed, but also
+# when tests were only skipped. This wrapper checks the log and returns
+# successfully if no tests failed; skipped tests do not result in an error.
+# This behaviour is needed in a continuous integration environment, when
+# building binary packages or in other cases where the testsuite should be
+# run automatically.
+#
+# Moreover, this script also converts the XML test report generated by CTest
+# into a JUnit report file that can be consumed by a lot of reporting
+# software.
+#
+# Author: Ansgar Burchardt <Ansgar.Burchardt@tu-dresden.de>
+# Author: Steffen Müthing <steffen.muething@iwr.uni-heidelberg.de> (for the JUnit part)
+
+import errno
+import glob
+import os.path
+import shutil
+import subprocess
+import sys
+import xml.etree.ElementTree as et
+from pathlib import Path
+import os
+import re
+
+
+class CTestParser:
+
+ def findCTestOutput(self):
+ files = glob.glob("Testing/*/Test.xml")
+ if len(files) != 1:
+ fn = files.join(", ")
+ raise Exception("Found multiple CTest output files: {}".format(files.join(", ")))
+ return files[0]
+
+ def printTest(self,test,output=None):
+ status = test.get("Status")
+ name = test.find("Name").text
+ fullName = test.find("FullName").text
+ if output is not None:
+ output = test.find("Results").find("Measurement").find("Value").text
+
+ print("======================================================================")
+ print("Name: {}".format(name))
+ print("FullName: {}".format(fullName))
+ print("Status: {}".format(status.upper()))
+ if output:
+ print("Output:")
+ for line in output.splitlines():
+ print(" ", line)
+ print()
+
+ def __init__(self,junitpath=None):
+ self.inputpath = self.findCTestOutput()
+ if junitpath is None:
+ if "CI_PROJECT_DIR" in os.environ:
+ buildroot = Path(os.environ["CI_PROJECT_DIR"])
+ # create a slug from the project name
+ name = os.environ["CI_PROJECT_NAME"].lower()
+ name = re.sub(r"[^-a-z0-9]","-",name);
+ junitbasename = "{}-".format(name)
+ else:
+ buildroot = Path.cwd()
+ junitbasename = ""
+ junitdir = buildroot / "junit"
+ junitdir.mkdir(parents=True,exist_ok=True)
+ self.junitpath = junitdir / "{}cmake.xml".format(junitbasename)
+ else:
+ self.junitpath = Path(junitpath)
+ junitdir = junitpath.resolve().parent
+ junitdir.mkdir(parents=True,exist_ok=True)
+ self.tests = 0
+ self.passed = 0
+ self.failures = 0
+ self.skipped = 0
+ self.errors = 0
+ self.skipped = 0
+ self.time = 0.0
+
+ def createJUnitSkeleton(self):
+ self.testsuites = et.Element("testsuites")
+ self.testsuite = et.SubElement(self.testsuites,"testsuite")
+ self.properties = et.SubElement(self.testsuite,"properties")
+
+ def fillJUnitStatistics(self):
+ self.testsuite.set("name","cmake")
+ self.testsuite.set("tests",str(self.tests))
+ self.testsuite.set("disabled","0")
+ self.testsuite.set("errors",str(self.errors))
+ self.testsuite.set("failures",str(self.failures))
+ self.testsuite.set("skipped",str(self.skipped))
+ self.testsuite.set("time",str(self.time))
+
+ def processTest(self,test):
+ testcase = et.SubElement(self.testsuite,"testcase")
+ testcase.set("name",test.find("Name").text)
+ testcase.set("assertions","1")
+ testcase.set("classname","cmake")
+ time = test.find("./Results/NamedMeasurement[@name='Execution Time']/Value")
+ if time is not None:
+ self.time += float(time.text)
+ testcase.set("time",time.text)
+ self.tests += 1
+ outcome = test.get("Status")
+ if outcome == "passed":
+ testcase.set("status","passed")
+ self.passed += 1
+ elif outcome == "failed":
+ self.failures += 1
+ testcase.set("status","failure")
+ failure = et.SubElement(testcase,"failure")
+ failure.set("message","program execution failed")
+ failure.text = test.find("./Results/Measurement/Value").text
+ self.printTest(test)
+ elif outcome == "notrun":
+ # This does not exit on older CMake versions, so work around that
+ try:
+ status = test.find("./Results/NamedMeasurement[@name='Completion Status']/Value").text
+ if status == "SKIP_RETURN_CODE=77":
+ self.skipped += 1
+ et.SubElement(testcase,"skipped")
+ elif status == "Required Files Missing":
+ self.errors += 1
+ error = et.SubElement(testcase,"error")
+ error.set("message","compilation failed")
+ error.set("type","compilation error")
+ self.printTest(test,output="Compilation error")
+ else:
+ error = et.SubElement(testcase,"error")
+ error.set("message","unknown error during test execution")
+ error.set("type","unknown")
+ error.text = test.find("./Results/Measurement/Value").text
+ self.errors += 1
+ self.printTest(test)
+ except AttributeError:
+ output_tag = test.find("./Results/Measurement/Value")
+ if output_tag is not None:
+ msg = output_tag.text
+ if "skipped" in msg:
+ self.skipped += 1
+ et.SubElement(testcase,"skipped")
+ elif "Unable to find required file" in msg:
+ self.errors += 1
+ error = et.SubElement(testcase,"error")
+ error.set("message","compilation failed")
+ error.set("type","compilation error")
+ self.printTest(test,output="Compilation error")
+ else:
+ error = et.SubElement(testcase,"error")
+ error.set("message","unknown error during test execution")
+ error.set("type","unknown")
+ error.text = msg
+ self.errors += 1
+ self.printTest(test)
+ else:
+ error = et.SubElement(testcase,"error")
+ error.set("message","unknown error during test execution")
+ error.set("type","unknown")
+ error.text = "no message"
+ self.errors += 1
+ self.printTest(test)
+
+ output_tag = test.find("./Results/Measurement/Value")
+ if output_tag is not None:
+ out = et.SubElement(testcase,"system-out")
+ out.text = output_tag.text
+
+ def process(self):
+
+ with open(self.inputpath, "r", encoding="utf-8") as fh:
+ tree = et.parse(fh)
+
+ root = tree.getroot()
+
+ self.createJUnitSkeleton()
+
+ for test in root.findall(".//Testing/Test"):
+ self.processTest(test)
+
+ self.fillJUnitStatistics()
+
+ with self.junitpath.open("wb") as fh:
+ fh.write(et.tostring(self.testsuites,encoding="utf-8"))
+ print("JUnit report for CTest results written to {}".format(self.junitpath))
+
+ return self.errors + self.failures
+
+
+def runCTest(argv=[]):
+ cmd = ["ctest",
+ "--output-on-failure",
+ "--dashboard", "ExperimentalTest",
+ "--no-compress-output",
+ ]
+ cmd.extend(argv)
+ subprocess.call(cmd)
+
+def checkDirectory():
+ if not os.path.exists("CMakeCache.txt"):
+ raise Exception("ERROR: dune-ctest must be run in a cmake build directory")
+
+def removeCTestOutput():
+ try:
+ shutil.rmtree("Testing")
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+
+def main():
+ try:
+ checkDirectory()
+ removeCTestOutput()
+ runCTest(argv=sys.argv[1:])
+ parser = CTestParser()
+ errors = parser.process()
+ status = 0 if errors == 0 else 1
+ sys.exit(status)
+ except Exception as e:
+ print("Internal error: {}".format(e))
+ sys.exit(127)
+
+if __name__ == "__main__":
+ main()
--- /dev/null
+#!/bin/sh
+# dune-git-whitespace-hook
+# DO NOT TOUCH THE PRECEDING LINE
+# It is used by dunecontrol to enable automatic updates of the whitespace hook
+
+# DUNE pre-commit hook to enforce whitespace policy
+# This hook prevents adding lines with trailing whitespace and or tab characters
+# in line indentation for certain files (see the TRAILING_WHITESPACE_DEFAULT and
+# TAB_IN_INDENT_DEFAULT variables below for the default sets of files that will
+# be checked).
+# You can tell the hook which files should be inspected by setting the Git
+# configuration variables "hooks.whitespace.trailing" and "hooks.whitespace.tabinindent".
+# Those variables should contain valid Perl regular expressions. The names of modified
+# files will be matched against those regexes.
+
+# git-diff-index needs a valid commit to compare to
+if git rev-parse --verify HEAD >/dev/null 2>&1
+then
+ against=HEAD
+else
+ # Initial commit: diff against an empty tree object
+ against=4b825dc642cb6eb9a060e54bf8d69288fbee4904
+fi
+
+
+# By default, we disallow trailing whitespace for the following files, but the check for C/C++ and CMake sources
+# happens in the tab-in-indent check to avoid confusing users with duplicate error messages
+TRAILING_WHITESPACE_DEFAULT='^(dune\.module|README|README\.SVN|COPYING|INSTALL|TODO)$|^[^/]*(\.md|\.pc\.in)$|^doc/.*\.md$'
+
+# By default, we disallow tabs in indents and trailing whitespace in C/C++ and CMake source files
+TAB_IN_INDENT_DEFAULT='(^|/)CMakeLists\.txt$|(\.cpp|\.hpp|\.cc|\.hh|\.c|\.h|\.cmake|\.sh|\.py)$'
+
+# Get user preferences
+TRAILING_WHITESPACE_FILES=$(git config hooks.whitespace.trailing)
+
+# Set default regex for disallowing trailing whitespace if the user did not set anything.
+# We need to check the return value of git-config to distinguish the case
+# when the user set an empty value
+if [ $? -ne 0 ];
+then
+ TRAILING_WHITESPACE_FILES="$TRAILING_WHITESPACE_DEFAULT"
+fi
+
+
+TAB_IN_INDENT_FILES=$(git config hooks.whitespace.tabinindent)
+
+# Set default regex for disallowing tabs if the user did not set anything.
+# We need to check the return value of git-config to distinguish the case
+# when the user set an empty value
+if [ $? -ne 0 ];
+then
+ TAB_IN_INDENT_FILES="$TAB_IN_INDENT_DEFAULT"
+fi
+
+
+# Unfortunately, we have to mess directly with the repository config file,
+# as git won't honor a custom config file specified by GIT_CONFIG
+
+# backup repository-local user setting for core.whitespace
+USER_WHITESPACE=$(git config --local --get core.whitespace)
+if [ $? -ne 0 ];
+then
+ USER_HAS_CUSTOM_WHITESPACE=0
+else
+ USER_HAS_CUSTOM_WHITESPACE=1
+fi
+
+# Figure out how to call xargs to make sure it won't invoke its argument with
+# an empty argument list. BSD xargs will not do that by default, while GNU xargs
+# needs -r to do the same. So we start by checking whether xargs does the right
+# thing without options. Now there could be other obscure versions of xargs out
+# there (on clusters etc.) that behave in yet another way, so we try with -r as
+# well. If that fails, we throw a big error message at the user.
+
+# In the following line, xargs should not call false, so the return value should be 0.
+echo "" | xargs false
+
+if [ $? -ne 0 ]; then
+ # Let's try with -r
+ echo "" | xargs -r false
+ if [ $? -ne 0 ]; then
+ # Houston, we have a problem
+ if [ -z "$DUNE_WHITESPACE_IGNORE_XARGS" ]; then
+ echo "You seem to be lacking a version of xargs that is compatible to either BSD or GNU!" 1>&2
+ echo "Please file a bug report at http://dune-project.org about this issue with your exact operating system type and version!" 1>&2
+ echo "You can still use this hook by setting the environment variable DUNE_WHITESPACE_IGNORE_XARGS to 1, but please be aware" 1>&2
+ echo "that the hook might create false positives." 1>&2
+ echo "==============================================================" 1>&2
+ echo "Aborting the commit..." 1>&2
+ exit 99
+ else
+ SILENTXARGS=xargs
+ fi
+ else
+ SILENTXARGS="xargs -r"
+ fi
+else
+ SILENTXARGS=xargs
+fi
+
+
+fail=0
+done=0
+
+do_cleanup()
+{
+
+ if [ $done -ne 1 ];
+ then
+ echo "Error while executing whitespace checking pre-commit hook!" 1>&2
+ echo "There might still be whitespace errors in your commit!" 1>&2
+ fi
+
+ if [ $USER_HAS_CUSTOM_WHITESPACE -eq 1 ];
+ then
+ git config --replace-all core.whitespace "$USER_WHITESPACE"
+ else
+ git config --unset core.whitespace
+ fi
+
+ # be nice and let the commit go through if something went wrong along the
+ # way and we did not record a failure
+ exit $fail
+}
+
+trap do_cleanup EXIT
+
+# set custom value
+git config --replace-all core.whitespace trailing-space
+
+if [ -z "$TRAILING_WHITESPACE_FILES" ];
+then
+ git diff-index --check --cached $against --
+ result=$?
+else
+ export TRAILING_WHITESPACE_FILES
+ git diff-index --cached --name-only $against \
+ | perl -ne 'print if /$ENV{TRAILING_WHITESPACE_FILES}/' \
+ | $SILENTXARGS git diff-index --check --cached $against --
+ result=$?
+fi
+
+if [ $result -ne 0 ];
+then
+ fail=1
+fi
+
+git config --replace-all core.whitespace trailing-space,tab-in-indent
+
+if [ -z "$TAB_IN_INDENT_FILES" ];
+then
+ git diff-index --check --cached $against --
+ result=$?
+else
+ export TAB_IN_INDENT_FILES
+ git diff-index --cached --name-only $against \
+ | perl -ne 'print if /$ENV{TAB_IN_INDENT_FILES}/' \
+ | $SILENTXARGS git diff-index --check --cached $against --
+ result=$?
+fi
+
+if [ $result -ne 0 ];
+then
+ fail=1
+fi
+
+done=1
+
+# trap will call the cleanup code
--- /dev/null
+#!/usr/bin/env bash
+
+set -e
+
+###############################################
+###
+### check for environment variables
+###
+if test -z "$GREP"; then
+ GREP=grep
+fi
+if test -z "$SED"; then
+ SED=sed
+fi
+
+if test -z "$MAKE"; then
+ MAKE=make
+fi
+
+system_default_cmake="no"
+if test -z "$CMAKE"; then
+ CMAKE=cmake
+ system_default_cmake="yes"
+fi
+
+space=" "
+tab=" "
+BLANK="$space$tab"
+nl=$'\n'
+
+###############################################
+###
+### read lib
+###
+
+# quote parameter for passing it through the shell
+# the result is meant to be used inside "...", i.e. the outer quotes are _not_
+# included
+#
+# you should have the identity
+# eval "[ x\"\$param\" = x\"$(doublequote "$param")\" ]"
+doublequote()
+{
+ local val="$1"
+ local result=
+ local token=
+ local special=
+ while [ -n "$val" ]; do
+ token=${val%%[\"\$\`\\]*}
+ val=${val#"$token"}
+ special=${val:0:1}
+ val=${val:1}
+ result=$result$token${special:+\\$special}
+ done
+ # if the value ends in \n, protect it by appending two double-quotes so it
+ # wont get stripped by the surrounding $(...)
+ case $result in
+ *"$nl") result=$result'""';;
+ esac
+ printf "%s" "$result"
+}
+
+canonicalname(){
+ if test $# -ne 1; then
+ echo Usage: canonicalname path >&2
+ return 1
+ fi
+ file="`eval echo $1`" # expand ~
+ if test ! -e "$file"; then
+ echo $file: file not found >&2
+ return 1
+ fi
+ # if this is a symlink, then follow the symlink
+ if test -L "$file"; then
+ fdir="`dirname \"$file\"`"
+ flink="`readlink \"$file\"`"
+ if test -e "$flink"; then
+ # these are absolute links, or links in the CWD
+ canonicalname "$flink"
+ else
+ canonicalname "$fdir/$flink"
+ fi
+ else
+ # if this is a file, then remember the filename and
+ # canonicalize the directory name
+ if test -f "$file"; then
+ fdir="`dirname \"$file\"`"
+ fname="`basename \"$file\"`"
+ fdir="`canonicalname \"$fdir\"`"
+ echo "$fdir/$fname"
+ fi
+ # if this is a directory, then create an absolute
+ # directory name and we are done
+ if test -d "$file"; then
+ (cd "$file"; pwd)
+ fi
+ fi
+}
+
+canonicalpath(){
+ if test $# -ne 1; then
+ echo Usage: canonicalpath path >&2
+ return 1
+ fi
+ dirname "`canonicalname "$1"`"
+}
+
+checkdebug () {
+ while test $# -gt 0; do
+ if test x$1 = x--debug; then
+ echo yes
+ return
+ fi
+ shift
+ done
+ echo no
+}
+
+DEBUG=`checkdebug $@`
+if test "x$DEBUG" = "xyes"; then
+ set -x
+ set -v
+fi
+
+
+onbuildfailure() {
+ echo "Terminating $(basename "$0") due to previous errors!" >&2
+ exit 1
+}
+
+#
+# for each module load the $CONTROL script part and run $command
+#
+# parameters:
+# $1 list of modules
+# $2-$* commands + parameters to execute
+#
+build_module() {
+ local module=$1
+ shift
+ while test $# -gt 0; do
+ # get command
+ command=$1
+ shift
+
+ # only load other parameters
+ load_opts NONE
+ # get command options
+ CMD_FLAGS=
+ while test $# -gt 0 && test "$1" != ":"; do
+ COMMAND=$(echo $command | tr '[:lower:]' '[:upper:]')
+ # setup parameter list
+ CMD_FLAGS="$CMD_FLAGS \"$(doublequote "$1")\""
+ shift
+ done
+ if test -z "$CMD_FLAGS"; then
+ load_opts $command
+ else
+ # disable usage of opts file
+ if test "x$DUNE_OPTS_FILE" != "x"; then
+ echo "WARNING: commandline parameters will overwrite setting in opts file \"$DUNE_OPTS_FILE\""
+ fi
+ fi
+
+ # skip command delimiter
+ if test "$1" = ":"; then shift; fi
+
+ # actually run the commands (we already know that these are valid commands)
+ local runcommand=run_$command
+
+ # build the modules
+ local path="$(eval "echo \$PATH_${module}")"
+ eval echo "--- calling $command for \$NAME_${module} ---"
+ trap onbuildfailure EXIT
+ if ! (
+ set -e
+ cd "$path"
+ export module
+ export ABS_BUILDDIR=$(abs_builddir $module $BUILDDIR)
+ eval_control $runcommand "$path/$CONTROL"
+ ); then eval echo "--- Failed to build \$NAME_${module} ---"; exit 1; fi
+ trap onfailure EXIT
+
+ eval echo "--- \$NAME_${module} done ---"
+ done
+}
+
+#
+# load command options from an opts file
+# the name of the opts file is stored in the global variable $DUNE_OPTS_FILE
+#
+# parameters:
+# $1 command
+#
+load_opts() {
+ local command=$1
+ local COMMAND=$(echo $command | tr '[:lower:]' '[:upper:]')
+ CMD_FLAGS=$(eval echo \$${COMMAND}_FLAGS)
+ local CMD_FLAGS_FROM_FILE=""
+ if test "$command" = "NONE"; then
+ BUILDDIR=$DUNE_BUILDDIR
+ if test "x$DUNE_OPTS_FILE" != "x"; then
+ if test -z "$BUILDDIR"; then
+ # no builddir set yet, use build dir from opts file if set
+ # Note: if --use-buiddir is used BUILDDIR will be set already
+ OPTS_FILE_BUILDDIR="$(eval BUILDDIR=""; . $DUNE_OPTS_FILE; eval echo \$BUILDDIR)"
+ if test -n "$OPTS_FILE_BUILDDIR"; then
+ BUILDDIR="$OPTS_FILE_BUILDDIR"
+ fi
+ fi
+ if test "$system_default_cmake" = "yes"; then
+ # We use cmake for building, but CMAKE is not yet set.
+ # Check the opts file for it
+ OPTS_FILE_CMAKE="$(eval CMAKE=""; . $DUNE_OPTS_FILE; eval echo \$CMAKE)"
+ if test -n "$OPTS_FILE_CMAKE"; then
+ CMAKE="$OPTS_FILE_CMAKE"
+ fi
+ fi
+ fi
+ fi
+ if test "x$DUNE_OPTS_FILE" != "x"; then
+ if test "$command" = "configure"; then
+ CMAKE_FLAGS="$(. $DUNE_OPTS_FILE; eval echo \$CMAKE_FLAGS)"
+ CMAKE_MODULE_PATH="$(. $DUNE_OPTS_FILE; eval echo \$CMAKE_MODULE_PATH)"
+ fi
+ CMD_FLAGS_FROM_FILE="$(eval ${COMMAND}_FLAGS=""; . $DUNE_OPTS_FILE; eval echo \$${COMMAND}_FLAGS)"
+ fi
+ if test -n "$CMD_FLAGS_FROM_FILE"; then
+ echo "----- using default flags \$${COMMAND}_FLAGS from $DUNE_OPTS_FILE -----"
+ CMD_FLAGS=$CMD_FLAGS_FROM_FILE
+ elif test -n "$CMD_FLAGS"; then
+ echo "----- using default flags \$${COMMAND}_FLAGS from environment -----"
+ fi
+
+ # if no build directory is set, use default "build-cmake"
+ if test -z "$BUILDDIR"; then
+ export BUILDDIR=build-cmake
+ fi
+}
+
+abs_builddir()
+{
+ local m=$1
+ local builddir=$2
+ local name="$(eval echo \$NAME_$m)"
+ local path="$(eval echo \$PATH_$m)"
+ case $BUILDDIR in
+ /*)
+ echo $builddir/$name
+ ;;
+ *)
+ echo $path/$builddir
+ ;;
+ esac
+}
+
+# Uses the current compiler to extract information about the
+# multiarch triplets and sets the export variable MULTIARCH_LIBDIR
+# according to it.
+# If no compiler is specified then cc or gcc is used.
+extract_multiarch(){
+ set +e #error in the multiarch detection should not be fatal.
+ local my_cxx_compiler
+ if test "x$MULTIARCH_LIBDIR" != "x"; then
+ return
+ fi
+ load_opts "cmake"
+ if test "x$my_cxx_compiler" == "x"; then
+ load_opts "configure"
+ fi
+ my_cxx_compiler=`echo $CMD_FLAGS | $GREP CXX | $SED "s/.*CXX=[\"']\{0,1\}\([^$BLANK'\"]*\)[\"']\{0,1\}.*/\1/"`
+ if test "x$my_cxx_compiler" == "x"; then
+ $(which cc &>/dev/null)
+ if test $? -eq "0"; then
+ my_cxx_compiler=cc
+ else
+ my_cxx_compiler=gcc
+ fi
+ fi
+ multiarch=$($my_cxx_compiler --print-multiarch 2>/dev/null)
+ if test $? -gt 0; then
+ for i in "target=" "Target:"; do
+ multiarch=$($my_cxx_compiler -v 2>&1| $GREP "$i" | $SED "s/.*$i[$BLANK]*\([a-z0-9_-]*\)/\1/" | $SED "s/-[a-z]*-linux-gnu/-linux-gnu/")
+ if test -n "$multiarch"; then break; fi
+ done
+ fi
+ set -e # set to old value.
+ export MULTIARCH_LIBDIR="lib/$multiarch"
+}
+
+export PREFIX_DIR="`canonicalpath "$0"`/.."
+
+# Read the modules find part
+. "$PREFIX_DIR/lib/dunemodules.lib"
+
+###############################################
+
+
+###############################################
+###
+### Commands
+###
+
+# check all parameter
+check_commands() {
+ while test $# -gt 0; do
+ # get command
+ command=$1
+ shift
+ # skip command options
+ while test $# -gt 0 && test "$1" != ":"; do
+ shift
+ done
+ # skip command delimiter
+ if test "$1" = ":"; then shift; fi
+ # test the commands
+ if ! is_command $command; then
+ usage
+ echo "ERROR: unknown command \"$command\"" >&2
+ exit 1
+ fi
+ done
+}
+
+# check whether the parameter is valid command or not
+is_command() {
+eval '
+case "$1" in
+ '`echo $COMMANDS | $SED -e 's/ / | /g'`')
+ return 0
+ ;;
+ *)
+ return 1
+ ;;
+esac'
+}
+
+# list of all dunecontrol commands
+COMMANDS="printdeps vcsetup update cmake configure make all exec bexec status svn git"
+
+# list of dunecontrol commands for which the version check is skipped by default
+COMMANDSTOSKIPVERSIONCHECK="update status svn git exec bexec"
+
+# help string for the commands
+printdeps_HELP="print recursive dependencies of a module"
+vcsetup_HELP="setup version control repository (Git etc.) or working copy (SVN)"
+update_HELP="update all modules from their repositories"
+cmake_HELP="run cmake for each module"
+configure_HELP="${cmake_HELP}"
+make_HELP="build each module"
+all_HELP="\trun 'vcsetup', 'configure' and 'make' command for each module"
+exec_HELP="execute an arbitrary command in each module source directory"
+bexec_HELP="execute an arbitrary command in each module build directory"
+status_HELP="show vc status for all modules"
+svn_HELP="\trun svn command for each svn managed module"
+git_HELP="\trun git command for each git managed module"
+
+#
+# setup command proxies
+# call will be forwarded to run_default_$command
+#
+
+for command in $COMMANDS; do
+ eval "run_$command () { run_default_$command; }"
+done
+
+#
+# default implementations for commands...
+# these can be overwritten in the $CONTROL files
+#
+
+run_default_exec () { bash -c "eval $CMD_FLAGS"; }
+
+run_default_bexec () {
+ if test -d "$ABS_BUILDDIR"; then
+ bash -c "cd \"$ABS_BUILDDIR\" && eval $CMD_FLAGS";
+ else
+ eval echo "Build directory \\\"$ABS_BUILDDIR\\\" not found, skipping bexec for \$NAME_${module}"
+ fi
+}
+
+run_default_status () {
+ local verbose=0
+ local update=""
+ local is_git=""
+ local is_svn=""
+ name="$(eval echo \$NAME_$module)"
+
+ if test -e .git; then is_git=1; fi
+ if test -d .svn; then is_svn=1; fi
+ if test ! "$is_svn" -a ! "$is_git" ; then
+ echo "module $name not under known version control"
+ return
+ fi
+
+ for i in $CMD_FLAGS; do
+ if eval test "x$i" = "x-v"; then verbose=1; fi
+ if eval test "x$i" = "x-vv"; then verbose=2; fi
+ if eval test "x$i" = "x-u"; then update="-u"; fi
+ done
+ # is out output connected to a tty?
+ if test -t 1; then
+ blue="\e[1m\e[34m"
+ green="\e[1m\e[32m"
+ red="\e[1m\e[31m"
+ reset="\e[0m\e[0m"
+ fi
+
+ if test $verbose -eq 1; then
+ test "$is_svn" && svn status $update | $GREP -E "^M|^A|^D|^C|^U"
+ test "$is_git" && git status -uno
+ elif test $verbose -eq 2; then
+ test "$is_svn" && svn status $update
+ test "$is_git" && git status
+ fi
+
+
+ if test "$is_svn" ; then
+ changed=$(svn status | $GREP -E "^M|^A|^D" | wc -l)
+ collisions=$(svn status | $GREP -E "^C"| wc -l)
+ pending=$(svn status $update | $GREP -E "^...... \* " | wc -l)
+ fi
+ if test "$is_git" ; then
+ changed=$(git status --porcelain | $GREP -E "^ *M|^ *A|^ *D|^ *R|^ *C" | wc -l)
+ collisions=$(git status --porcelain | $GREP -E "^ *U"| wc -l)
+ pending=$(git status | $GREP -E "^\# Your branch is ahead |^\# Your branch is behind " | wc -l)
+ fi
+ color=$green
+ text="no changes"
+ if [ $changed -eq 0 ]; then
+ true
+ elif [ $changed -eq 1 ]; then
+ color=$blue;
+ text="1 change"
+ else
+ color=$blue;
+ text="$changed changes"
+ fi
+ if [ $pending -eq 0 ]; then
+ true
+ elif [ $pending -eq 1 ]; then
+ color=$blue;
+ text="$text, 1 update pending"
+ else
+ color=$blue;
+ text="$text, $pending updates pending"
+ fi
+ if [ $collisions -eq 0 ]; then
+ true
+ elif [ $collisions -eq 1 ]; then
+ color=$red
+ text="$text, 1 collision"
+ else
+ color=$red
+ text="$text, $count collisions"
+ fi
+ echo -e "$color[$text]$reset $name"
+}
+
+run_default_vcsetup() {
+ # load user options
+ if [ -n "$CMD_FLAGS" ]; then
+ eval "$CMD_FLAGS"
+ fi
+
+ # Check for both a file and a directory to cope with Git submodules
+ if [ -e .git ] ; then
+
+ # Read Whitespace-Hook setting from dune.module file
+ local SETUPGITHOOK="$($GREP -i "^[$BLANK]*Whitespace-Hook:" dune.module | cut -d ':' -f2 | eval $PARSER_TRIM | tr '[:upper:]' '[:lower:]')"
+
+ if [ "x$SETUPGITHOOK" = "xyes" ]; then
+ # we have to install the Git whitespace hook
+
+ # There are several options as to what the current worktree might be backed by right now:
+ # - a plain old repository -> copy to .git/hooks
+ # - a submodule repository -> .git refers to the submodule repository inside the root $GIT_DIR
+ # - a git repo worktree -> .git refers to the worktree data inside the original $GIT_DIR and we need to find
+ # the "canonical" $GIT_DIR root
+ # - a submodule in a worktree -> That just gets checked out again into the worktree backing store, so no worktree
+ # dereferencing here
+
+ # We try this first by trying git's built-in infrastructure, but if the user's git is too old, we fall back to
+ # manual parsing
+
+ GITHOOKPATH="$(git rev-parse --git-common-dir)"
+ if [ $? -ne 0 -o "${GITHOOKPATH}" = "--git-common-dir" ] ; then
+
+ # no worktree support from here on out
+
+ GITHOOKPATH="$(git rev-parse --git-dir)"
+
+ if [ $? -ne 0 -o "${GITHOOKPATH}" = "--git-dir" ] ; then
+
+ # do the parsing manually
+
+ if [ -f .git ] ; then
+ # submodule -> .git contains a pointer to the repository
+ GITHOOKPATH="$($SED 's/gitdir: //' < .git)"
+ else
+ # standard case, .git is the repository
+ GITHOOKPATH=.git
+ fi
+ fi
+ fi
+ GITHOOKPATH="${GITHOOKPATH}/hooks/pre-commit"
+
+ if [ -n "$DISABLEWHITESPACEHOOK" ] ; then
+ # the user doesn't want the Git whitespace hook - deinstall it if necessary and warn the user
+ echo "WARNING: The current module wants to install the DUNE whitespace hook, but you have disabled the hook in your options!"
+ echo "WARNING: You will have to make sure that your commits don't introduce any trailing whitespace or indentation with tabs!"
+ echo "WARNING: Otherwise, your commits might be rejected when trying to push them to an official repository!"
+
+ if [ -e "$GITHOOKPATH" ]; then
+ # there is a pre-commit hook, check whether it is our whitespace hook
+ local HOOKTAG="$(eval head -n 2 \"$GITHOOKPATH\" | tail -n 1)"
+ if [ "x$HOOKTAG" = "x# dune-git-whitespace-hook" ]; then
+ echo "--> Removing DUNE whitespace hook as requested by the user"
+ rm "$GITHOOKPATH"
+ fi
+ fi
+ else
+ # standard handling of Git whitespace hook
+ for f in dune-git-whitespace-hook git-whitespace-hook; do
+ f="${PREFIX_DIR}/bin/${f}"
+ if [ -e "${f}" ]; then
+ git_whitespace_hook="${f}"
+ break
+ fi
+ done
+ if [ -z "${git_whitespace_hook:-}" ]; then
+ echo "Did not find git-whitespace-hook." >&2
+ exit 1
+ fi
+ if [ ! -e "$GITHOOKPATH" ]; then
+ # there is no hook yet, we can safely install ours
+ echo "--> Installing Git pre-commit hook to enforce whitespace policy"
+ cp -p "${git_whitespace_hook}" "$GITHOOKPATH"
+ else
+ # there is already a hook, check whether it is our whitespace hook
+ local HOOKTAG="$(eval head -n 2 \"$GITHOOKPATH\" | tail -n 1)"
+ if [ "x$HOOKTAG" = "x# dune-git-whitespace-hook" ]; then
+ if [ "${git_whitespace_hook}" -nt "$GITHOOKPATH" ]; then
+ echo "--> Updating Git pre-commit hook with newer version"
+ cp -p "${git_whitespace_hook}" "$GITHOOKPATH"
+ fi
+ else
+ echo "WARNING: Existing pre-commit hook found!"
+ echo "WARNING: Skipping installation of DUNE whitespace hook!"
+ echo "WARNING: If you want to contribute patches to DUNE, you should make sure to call the whitespace hook"
+ echo "WARNING: (dune-common/bin/git-whitespace-hook) from you custom pre-commit hook, otherwise your commits"
+ echo "WARNING: might contain trailing whitespace and will not apply cleanly to the official repositories!"
+ fi
+ fi
+ fi
+ fi
+
+ # Apply git configuration settings
+ if [ -f .vcsetup/config ]; then
+ echo -n "--> Setting Git configuration entries... "
+ cat .vcsetup/config | while read; do
+ # Filter out comments
+ local COMMENT="$(echo $REPLY | $GREP '^#')"
+ if [ ! "x$COMMENT" = "x$REPLY" ]; then
+ # parse line into an array first to catch obvious syntax errors
+ # like 'option value; rm -rf /'
+ eval local GIT_ARGS=($REPLY)
+ git config "${GIT_ARGS[@]}"
+ fi
+ done
+ echo "done"
+ fi
+
+ # Apply user supplied configuration settings
+ if [ -n "$GIT_CONFIG_FILE" ]; then
+ if [ -f "$GIT_CONFIG_FILE" ]; then
+ echo -n "--> Setting custom Git configuration entries from '$GIT_CONFIG_FILE'... "
+ cat "$GIT_CONFIG_FILE" | while read; do
+ # Filter out comments
+ local COMMENT="$(echo $REPLY | $GREP '^#')"
+ if [ ! "x$COMMENT" = "x$REPLY" ]; then
+ # parse line into an array first to catch obvious syntax errors
+ # like 'option value; rm -rf /'
+ eval local GIT_ARGS=($REPLY)
+ git config "${GIT_ARGS[@]}"
+ fi
+ done
+ echo "done"
+ else
+ echo "WARNING: custom Git config file '$GIT_CONFIG_FILE' not found!"
+ fi
+ fi
+
+ fi
+
+ # Run custom setup scripts
+ if [ -e .git -o -d .svn ]; then
+ if [ -d .vcsetup/run.d ]; then
+ for SCRIPT in .vcsetup/run.d/* ; do
+ if [ -x "$SCRIPT" ]; then
+ echo "--> Running $SCRIPT"
+ "$SCRIPT"
+ fi
+ done
+ fi
+ fi
+}
+
+run_default_update () {
+ if test -d .svn; then
+ svn update
+ elif test -e .git; then
+ if test -d .git && test -d ".git/svn" && test -n "`git svn find-rev HEAD`"; then
+ # If the current HEAD points to a SVN commit, update via git-svn
+ git svn rebase
+ else
+ # Update all remotes (if any)
+ git remote update
+
+ # merge all changes fast-forward style if possible
+ if ! git merge --ff-only FETCH_HEAD 2> /dev/null; then
+ eval echo "\$NAME_${module} seems to be using git, and could not be"
+ echo "updated automatically. Please update it manually."
+ echo "(Usually, this is done via 'git svn rebase' for modules using"
+ echo "subversion or 'git merge' for modules which use git natively."
+ echo "Conflicts can be resolved using 'git mergetool'.)"
+ fi
+ fi
+ else
+ eval echo "WARNING: \$NAME_${module} is not under a known version control system."
+ echo " We support svn and git."
+ fi
+}
+
+run_default_cmake () {
+ extract_multiarch
+
+ # tell CMake about the build directory root when we are using an absolute build directory
+ if [[ ${BUILDDIR} = /* ]] ; then
+ CMAKE_PARAMS="$CMAKE_PARAMS -DDUNE_BUILD_DIRECTORY_ROOT_PATH='${BUILDDIR}'"
+ fi
+
+ # add arguments given as configure-opts to CMAKE_params
+ CMAKE_PARAMS="$CMAKE_PARAMS $CMD_FLAGS"
+
+ # get dependencies & suggestions
+ sort_modules $module
+ for m in $MODULES; do
+ path="$(eval "echo \$PATH_$m")"
+
+ # add other module's build dir to path
+ if [ $module != $m ] ; then
+ name=$(eval "echo \$NAME_$m")
+ local m_ABS_BUILDDIR=$(abs_builddir $m $BUILDDIR)
+
+ if test -d "$m_ABS_BUILDDIR"; then
+ CMAKE_PARAMS="$CMAKE_PARAMS \"-D""$name""_DIR=$m_ABS_BUILDDIR\""
+ else
+ TMP_PARAMS="\"-D""$name""_DIR=$path\""
+ for i in $MULTIARCH_LIBDIR lib lib64 lib32; do
+ if test -d "$path/$i/cmake/$name"; then
+ TMP_PARAMS="\"-D""$name""_DIR=$path/$i/cmake/$name\""
+ break;
+ fi
+ done
+ CMAKE_PARAMS="$CMAKE_PARAMS $TMP_PARAMS"
+ fi
+ fi
+ done
+ # create build directory if requested
+ test -d "$ABS_BUILDDIR" || mkdir -p "$ABS_BUILDDIR"
+ SRCDIR="$PWD"
+ cd "$ABS_BUILDDIR"
+
+ # Prevent using an empty module path
+ if test -n "$CMAKE_MODULE_PATH"; then
+ _MODULE_PATH="-DCMAKE_MODULE_PATH=\"$CMAKE_MODULE_PATH\""
+ fi
+ echo "$CMAKE $_MODULE_PATH $CMAKE_PARAMS $CMAKE_FLAGS \"$SRCDIR\""
+ eval $CMAKE "$_MODULE_PATH $CMAKE_PARAMS $CMAKE_FLAGS \"$SRCDIR\"" || exit 1
+}
+
+run_default_configure () {
+ # configure just forwards to cmake
+ run_default_cmake
+}
+
+run_default_make () {
+ test ! -d "$ABS_BUILDDIR" || cd "$ABS_BUILDDIR"
+ PARAMS="$CMD_FLAGS"
+ echo "build directory: $BUILDDIR"
+ # prepend '--' to separate cmake and make parameters
+ if ! $(echo "$PARAMS" | grep -q -- '--'); then
+ PARAMS="-- $PARAMS"
+ fi
+ echo $CMAKE --build . "$PARAMS"
+ eval $CMAKE --build . "$PARAMS"
+}
+
+run_default_all () {
+ for cmd in vcsetup cmake make; do
+ eval echo "--- calling $cmd for \$NAME_${module} ---"
+ load_opts $cmd
+ run_$cmd
+ done
+}
+
+run_default_svn () {
+ if test -d .svn; then
+ PARAMS="$CMD_FLAGS"
+ eval svn "$PARAMS"
+ fi
+}
+
+run_default_git () {
+ if test -e .git; then
+ PARAMS="$CMD_FLAGS"
+ eval git "$PARAMS"
+ fi
+}
+
+###############################################
+###
+### main
+###
+
+onfailure() {
+ echo "Execution of $(basename "$0") terminated due to errors!" >&2
+ exit 1
+}
+
+usage () {
+ (
+ echo "Usage: $(basename "$0") [OPTIONS] COMMANDS [COMMAND-OPTIONS]"
+ echo ""
+ echo " Execute COMMANDS for all Dune modules found. All entries in the"
+ echo " DUNE_CONTROL_PATH variable are scanned recursively for Dune modules."
+ echo " If DUNE_CONTROL_PATH is empty, the current directory is scanned."
+ echo " Dependencies are controlled by the $CONTROL files."
+ echo ""
+ echo "OPTIONS:"
+ echo " -h, --help show this help"
+ echo " --debug enable debug output of this script"
+ echo " --module=mod apply the actions on module mod"
+ echo " and all modules it depends on"
+ echo " --only=mod only apply the actions on module mod"
+ echo " and not the modules it depends on"
+ echo " --current only apply the actions on the current module,"
+ echo " i.e. the one whose source tree we are standing in,"
+ echo " and not the modules it depends on"
+ echo " --current-dep apply the actions on the current module,"
+ echo " and all modules it depends on"
+ echo " --resume resume a previous run (only consider the modules"
+ echo " not built successfully on the previous run)"
+ echo " --skipfirst skip the first module (use with --resume)"
+ echo " --skipversioncheck do not perform version checks when looking for other Dune modules"
+ echo " --opts=FILE load default options from FILE"
+ echo " --builddir=NAME make out-of-source builds in a subdir NAME."
+ echo " This directory is created inside each module."
+ echo " If NAME is an absolute path, the build directory "
+ echo " is set to NAME/module-name for each module."
+ echo " --[COMMAND]-opts=opts set options for COMMAND"
+ echo " (this is mainly useful for the 'all' COMMAND)"
+ echo "COMMANDS:"
+ echo " Colon-separated list of commands. Available commands are:"
+ printf " \`help'\tguess what :-)\n"
+ printf " \`print'\tprint the list of modules sorted after their dependencies\n"
+ printf " \`info'\tsame as \`print\', but including whether it is a dependency or suggestion\n"
+ for i in $COMMANDS; do
+ printf " \`$i'\t$(eval echo \$${i}_HELP)\n"
+ done
+ printf " \`export'\trun eval \`dunecontrol export\` to save the list of\n"
+ printf " \t\tdune.module files to the DUNE_CONTROL_PATH variable\n"
+ echo
+ ) >&2
+}
+
+# create the module list
+create_module_list() {
+ # try to get the resume file name from the options
+ if test -z "$RESUME_FILE" && test -n "$DUNE_OPTS_FILE"; then
+ export RESUME_FILE="$(eval . $DUNE_OPTS_FILE; eval echo \$RESUME_FILE)"
+ fi
+
+ if test "$RESUME_FLAG" = "yes" ; then
+ if ! test -s "$RESUME_FILE" ; then
+ echo "Error: No previous run to resume. Please make sure that the RESUME_FILE"
+ echo " is the name of a writeable file (currently it is '$RESUME_FILE')"
+ exit 1
+ fi
+
+ export MODULES=
+ RESUME="`cat "$RESUME_FILE"`"
+ for a in $RESUME ; do
+ export NAME_`fix_variable_name $a`="$a"
+ fix_and_assign MODULE "$a"
+ export SEARCH_MODULES="$SEARCH_MODULES $MODULE"
+ export ONLY="$ONLY $MODULE"
+ done
+ fi
+
+ find_modules_in_path
+ if test "x$ONLY" != x; then
+ export MODULES="$ONLY"
+ elif test "x$SEARCH_MODULES" != "x"; then
+ sort_modules $SEARCH_MODULES
+ else
+ sort_modules $MODULES
+ fi
+
+ if test "x$REVERSE_FLAG" = "xyes"; then
+ export MODULES="$REVERSEMODULES"
+ fi
+
+ if test "x$SKIPFIRST" = "xyes" ; then
+ export MODULES=`echo $MODULES " " | cut '--delimiter= ' --fields=2-`
+ fi
+ # warn about superseeded modules:
+ if test -n "$superseded_modules"; then
+ # sort moules list and make it unique.
+ superseded_modules=$(echo $superseded_modules | tr ' ' '\n'| sort -u)
+ echo >&2
+ echo "The following local modules do supersede the corresponding installed ones:" >&2
+ echo "$superseded_modules" >&2
+ echo >&2
+ fi
+}
+
+# print the module list
+print_module_list() {
+ DELIM=$1
+ shift
+ while test -n "$2"; do
+ echo -n "$(eval echo \$NAME_$1)$DELIM"
+ shift
+ done
+ echo -n "$(eval echo \$NAME_$1)"
+}
+
+trap onfailure EXIT
+
+# clear variables
+export SEARCH_MODULES=""
+export MODULES=""
+export ONLY=""
+export RESUME_FLAG=no
+export REVERSE_FLAG=no
+export SKIPFIRST=no
+
+# parse commandline parameters
+while test $# -gt 0; do
+ # get option
+ command=$1
+ option=$1
+
+ # get args
+ set +e
+ # stolen from configure...
+ # when no option is set, this returns an error code
+ arg=`expr "x$option" : 'x[^=]*=\(.*\)'`
+ set -e
+
+ # switch
+ case "$option" in
+ --opts=*)
+ if test "x$arg" = "x"; then
+ usage
+ echo "ERROR: Parameter for --opts is missing" >&2
+ echo >&2
+ exit 1;
+ fi
+ DUNE_OPTS_FILE=`canonicalname $arg`
+ if ! test -r "$DUNE_OPTS_FILE"; then
+ usage
+ echo "ERROR: could not read opts file \"$DUNE_OPTS_FILE\"" >&2
+ echo >&2
+ exit 1;
+ fi
+ ;;
+ --*-opts=*)
+ optcmd=`expr "x$option=" : 'x--\([^-]*\)-opts=.*'`
+ if is_command $optcmd; then
+ COMMAND=`echo $optcmd | tr '[:lower:]' '[:upper:]'`
+ export ${COMMAND}_FLAGS="$arg"
+ else
+ usage
+ echo "ERROR: unknown option \"$option\"" >&2
+ exit 1
+ fi
+ ;;
+ -h|--help)
+ command=help
+ break
+ ;;
+ -p|--print)
+ command=print
+ break
+ ;;
+ --module=*)
+ if test "x$arg" = "x"; then
+ usage
+ echo "ERROR: Parameter for --module is missing" >&2
+ echo >&2
+ exit 1;
+ fi
+ for a in `echo $arg | tr ',' ' '`; do
+ export NAME_`fix_variable_name $a`="$a"
+ fix_and_assign MODULE "$a"
+ export SEARCH_MODULES="$SEARCH_MODULES $MODULE"
+ done
+ ;;
+ --only=*)
+ if test "x$arg" = "x"; then
+ usage
+ echo "ERROR: Parameter for --only is missing" >&2
+ echo >&2
+ exit 1;
+ fi
+ for a in `echo $arg | tr ',' ' '`; do
+ export NAME_`fix_variable_name $a`="$a"
+ fix_and_assign MODULE "$a"
+ export SEARCH_MODULES="$SEARCH_MODULES $MODULE"
+ export ONLY="$ONLY $MODULE"
+ done
+ ;;
+ --builddir=*)
+ export DUNE_BUILDDIR=$arg
+ ;;
+ --no-builddir)
+ export DUNE_BUILDDIR=""
+ ;;
+ --skipversioncheck)
+ export SKIPVERSIONCHECK=yes
+ ;;
+ --current)
+ while ! test -f $CONTROL; do
+ cd ..
+ if test "$OLDPWD" = "$PWD"; then
+ echo "You are not inside the source tree of a DUNE module." >&2
+ exit -1
+ fi
+ done;
+ parse_control $PWD/$CONTROL
+ fix_and_assign MODULE "$module"
+ export SEARCH_MODULES="$SEARCH_MODULES $MODULE"
+ export ONLY="$ONLY $MODULE"
+ ;;
+ --current-dep)
+ while ! test -f $CONTROL; do
+ cd ..
+ if test "$OLDPWD" = "$PWD"; then
+ echo "You are not inside the source tree of a DUNE module." >&2
+ exit -1
+ fi
+ done;
+ parse_control $PWD/$CONTROL
+ fix_and_assign MODULE "$module"
+ export SEARCH_MODULES="$SEARCH_MODULES $MODULE"
+ ;;
+ --resume)
+ export RESUME_FLAG="yes"
+ ;;
+ --reverse)
+ export REVERSE_FLAG="yes"
+ ;;
+ --skipfirst)
+ export SKIPFIRST=yes
+ ;;
+ --debug) true ;; # ignore this option, it is handled right at the beginning
+ --*)
+ usage
+ echo "ERROR: Unknown option \`$option'" >&2
+ echo >&2
+ exit 1
+ ;;
+ *)
+ break
+ ;;
+ esac
+
+ shift
+done
+
+extract_multiarch
+
+# create PKG_CONFIG_PATH for installed dune modules
+for i in $MULTIARCH_LIBDIR lib64 lib32 lib; do
+ if test -d "$PREFIX_DIR/$i/pkgconfig"; then
+ export PKG_CONFIG_PATH="$PKG_CONFIG_PATH:$PREFIX_DIR/$i/pkgconfig"
+ fi
+done
+
+# we assume there should be a command...
+if test "x$command" = "x"; then
+ usage
+ exit 1
+fi
+
+case "$command" in
+ print)
+ create_module_list
+ eval "print_module_list ' ' $MODULES"
+ echo >&2
+ ;;
+ info)
+ create_module_list
+ echo $SORTEDMODULES_INFO
+ ;;
+ export)
+ create_module_list
+ DUNE_CONTROL_PATH=""
+ for mod in $MODULES; do
+ path="$(eval echo \$PATH_$mod)"
+ name=$(eval echo \$NAME_$mod)
+ if test -f "$path/dune.module"; then
+ export DUNE_CONTROL_PATH="$DUNE_CONTROL_PATH:$path/dune.module"
+ else
+ if test -f "$path/lib/dunecontrol/$name/dune.module"; then
+ export DUNE_CONTROL_PATH="$DUNE_CONTROL_PATH:$path/lib/dunecontrol/$name/dune.module"
+ else
+ echo "ERROR: while creating list of dune.module files" >&2
+ echo " couldn't find dune.module file for $name in $path" >&2
+ echo >&2
+ exit 1
+ fi
+ fi
+ done
+ echo export DUNE_CONTROL_PATH=$(echo $DUNE_CONTROL_PATH | $SED -e 's/^://')
+ ;;
+ printdeps)
+ find_modules_in_path
+ if test "x$SEARCH_MODULES" == "x"; then
+ echo "ERROR: printdeps requires an explicit --module=... parameter" >&2
+ exit 1
+ fi
+ mainmod=`echo $SEARCH_MODULES`
+ name=`eval echo \\${NAME_$mainmod}`
+ echo "dependencies for $name"
+ ### DEPENDENCIES
+ sort_modules $mainmod
+ for mod in $SORTEDMODULES_DEPS; do
+ eval echo "\" \$NAME_${mod} (required)\""
+ done
+ for mod in $SORTEDMODULES_SUGS; do
+ eval echo "\" \$NAME_${mod} (suggested)\""
+ done
+ ;;
+ unexport)
+ echo export DUNE_CONTROL_PATH=""
+ ;;
+ help)
+ usage
+ ;;
+ *)
+ set -e
+ # skip version check if command is in according list
+ if grep -q "$1" <<<"$COMMANDSTOSKIPVERSIONCHECK" ; then
+ export SKIPVERSIONCHECK=yes;
+ fi
+ check_commands "$@"
+ create_module_list
+ NAMES=""
+ BUILDMODULES=""
+ for mod in $MODULES; do
+ if test "$(eval echo \$INST_$mod)" != "yes"; then
+ NAMES="$NAMES$(eval echo \$NAME_$mod) "
+ BUILDMODULES="$BUILDMODULES$mod "
+ fi
+ done
+ echo "--- going to build $NAMES ---"
+ if test -n "$RESUME_FILE"; then
+ # write all modules to the resume file
+ for mod in $MODULES ; do
+ echo "$mod"
+ done > "$RESUME_FILE"
+ fi
+
+ for mod in $BUILDMODULES; do
+ build_module "$mod" "$@"
+
+ if test -n "$RESUME_FILE"; then
+ # remove the current module from the resume file
+ modules_togo=`cat "$RESUME_FILE"`
+ for mod_togo in $modules_togo ; do
+ if test "$mod_togo" != "$mod" ; then
+ echo "$mod_togo"
+ fi
+ done > "$RESUME_FILE"
+ fi
+ done
+ echo "--- done ---"
+ ;;
+esac
+
+trap - EXIT
--- /dev/null
+#!/usr/bin/env python3
+
+import sys, os, io, getopt, re, shutil
+import importlib, subprocess
+import email.utils
+import pkg_resources
+from datetime import date
+
+# make sure that 'metadata' is taken from the current `dune-common` folder
+# and not some installed version which might be different from the one I'm
+# packaging (by mistake). The path to `packagemetadata.py` needs to be
+# added to the python path (to work here) and to the environment so that a
+# later call to `python setup.py` also works.
+here = os.path.dirname(os.path.abspath(__file__))
+mods = os.path.join(here, "..", "python", "dune")
+sys.path.append(mods)
+pythonpath = mods + ":" + os.environ.get('PYTHONPATH','.')
+os.environ['PYTHONPATH'] = pythonpath
+from packagemetadata import metaData
+
+def main(argv):
+
+ repositories = ["gitlab", "testpypi", "pypi"]
+ def usage():
+ return 'usage: dunepackaging.py [--upload <'+"|".join(repositories)+'> | -c | --clean | --version <version> | --onlysdist | --bdist_conda]'
+
+ try:
+ opts, args = getopt.getopt(argv, "hc", ["upload=", "clean", "version=", "onlysdist", "bdist_conda"])
+ except getopt.GetoptError:
+ print(usage())
+ sys.exit(2)
+
+ upload = False
+ repository = "gitlab"
+ clean = False
+ version = None
+ onlysdist = False
+ bdistconda = False
+ for opt, arg in opts:
+ if opt == '-h':
+ print(usage())
+ sys.exit(2)
+ elif opt in ("--upload"):
+ upload = True
+ if arg != '':
+ repository = arg
+ if repository not in repositories:
+ print("Specified repository must be one of: " + " ".join(repositories))
+ sys.exit(2)
+ elif opt in ("-c", "--clean"):
+ clean = True
+ elif opt in ("--version"):
+ version = arg
+ elif opt in ("--onlysdist"):
+ onlysdist = True
+ elif opt in ("--bdist_conda"):
+ onlysdist = True
+ bdistconda = True
+
+ # Remove generated files
+ def removeFiles():
+ import glob
+ files = ['MANIFEST', 'dist', '_skbuild', '__pycache__']
+ print("Remove generated files: " + ", ".join(files))
+ remove = ['rm', '-rf'] + files
+ subprocess.call(remove)
+ # checkout setup.py and pyproject.toml
+ checkout = ['git', 'checkout', 'setup.py', 'pyproject.toml']
+ subprocess.call(checkout)
+
+ if clean:
+ removeFiles()
+ sys.exit(0)
+
+ data, cmake_flags = metaData(version, dependencyCheck=False)
+
+ if version is None:
+ version = data.version
+
+ # Generate setup.py
+ print("Generate setup.py")
+ f = open("setup.py", "w")
+ if data.name == 'dune-common':
+ f.write("import os, sys\n")
+ f.write("here = os.path.dirname(os.path.abspath(__file__))\n")
+ f.write("mods = os.path.join(here, \"python\", \"dune\")\n")
+ f.write("sys.path.append(mods)\n\n")
+ f.write("try:\n")
+ f.write(" from dune.packagemetadata import metaData\n")
+ f.write("except ImportError:\n")
+ f.write(" from packagemetadata import metaData\n")
+ f.write("from skbuild import setup\n")
+ f.write("setup(**metaData('"+version+"')[1])\n")
+ f.close()
+
+ # Generate pyproject.toml
+ print("Generate pyproject.toml")
+ f = open("pyproject.toml", "w")
+ requires = ["pip", "setuptools", "wheel", "scikit-build", "cmake", "ninja", "requests"]
+ requires += [r for r in data.asPythonRequirementString(data.depends + data.python_requires) if r not in requires]
+ f.write("[build-system]\n")
+ f.write("requires = "+requires.__str__()+"\n")
+ f.write("build-backend = 'setuptools.build_meta'\n")
+ f.close()
+
+ # Create source distribution and upload to repository
+ python = sys.executable
+ if upload or onlysdist:
+ print("Remove dist")
+ remove = ['rm', '-rf', 'dist']
+ subprocess.call(remove)
+
+ # check if we have scikit-build
+ import pkg_resources
+ installed = {pkg.key for pkg in pkg_resources.working_set}
+ if not 'scikit-build' in installed:
+ print("Please install the pip package 'scikit-build' to build the source distribution.")
+ sys.exit(2)
+
+ # append hash of current git commit to README
+ shutil.copy('README.md', 'tmp_README.md')
+ githash = ['git', 'rev-parse', 'HEAD']
+ hash = subprocess.check_output(githash, encoding='UTF-8')
+ with open("README.md", "a") as f:
+ f.write("\n\ngit-" + hash)
+
+ print("Create source distribution")
+ # make sure setup.py/pyproject.toml are tracked by git so that
+ # they get added to the package by scikit
+ gitadd = ['git', 'add', 'setup.py', 'pyproject.toml']
+ subprocess.call(gitadd)
+ # run sdist
+ build = [python, 'setup.py', 'sdist']
+ subprocess.call(build, stdout=subprocess.DEVNULL)
+ # undo the above git add
+ gitreset = ['git', 'reset', 'setup.py', 'pyproject.toml']
+ subprocess.call(gitreset)
+
+ # restore README.md
+ shutil.move('tmp_README.md', 'README.md')
+
+ if not onlysdist:
+ # check if we have twine
+ import pkg_resources
+ installed = {pkg.key for pkg in pkg_resources.working_set}
+ if not 'twine' in installed:
+ print("Please install the pip package 'twine' to upload the source distribution.")
+ sys.exit(2)
+
+ twine = [python, '-m', 'twine', 'upload']
+ twine += ['--repository', repository]
+ twine += ['dist/*']
+ subprocess.call(twine)
+
+ removeFiles()
+
+ # create conda package meta.yaml (experimental)
+ if bdistconda:
+ import hashlib
+ remove = ['rm', '-rf', 'dist/'+data.name]
+ subprocess.call(remove)
+ mkdir = ['mkdir', 'dist/'+data.name ]
+ subprocess.call(mkdir)
+
+ print("Create bdist_conda (experimental)")
+ distfile = 'dist/'+data.name+'-'+version+'.tar.gz'
+ datahash = ''
+ with open(distfile, "rb") as include:
+ source = include.read()
+ datahash = hashlib.sha256( source ).hexdigest()
+
+ print("Generate ",'dist/'+data.name+'/meta.yaml')
+ f = open('dist/'+data.name+'/meta.yaml', "w")
+ f.write('{% set name = "' + data.name + '" %}\n')
+ f.write('{% set version = "' + version + '" %}\n')
+ f.write('{% set hash = "' + datahash + '" %}\n\n')
+ f.write('package:\n')
+ f.write(' name: "{{ name|lower }}"\n')
+ f.write(' version: "{{ version }}"\n\n')
+ f.write('source:\n')
+ f.write(' path: ../{{ name }}-{{ version }}/\n')
+ f.write(' sha256: {{ hash }}\n\n')
+ f.write('build:\n')
+ f.write(' number: 1\n')
+ if 'TMPDIR' in os.environ:
+ f.write(' script_env:\n')
+ f.write(' - TMPDIR=' + os.environ['TMPDIR'] +'\n')
+ f.write(' script: "{{ PYTHON }} -m pip install . --no-deps --ignore-installed -vv "\n\n')
+ f.write('requirements:\n')
+
+ requirements = ['pip', 'python', 'mkl', 'tbb', 'intel-openmp',
+ 'libgcc-ng', 'libstdcxx-ng', 'gmp', 'scikit-build',
+ 'mpi4py', 'matplotlib', 'numpy', 'scipy', 'ufl']
+
+ for dep in data.depends:
+ requirements += [dep[0]]
+
+ f.write(' host:\n')
+ for dep in requirements:
+ f.write(' - ' + dep + '\n')
+
+ f.write('\n')
+ f.write(' run:\n')
+ for dep in requirements:
+ f.write(' - ' + dep + '\n')
+
+ f.write('\n')
+ f.write('test:\n')
+ f.write(' imports:\n')
+ f.write(' - ' + data.name.replace('-','.') + '\n\n')
+ f.write('about:\n')
+ f.write(' home: '+data.url+'\n')
+ f.write(' license: GPLv2 with linking exception.\n')
+ f.write(' license_family: GPL\n')
+ f.write(' summary: '+data.description+'\n')
+ f.close()
+
+if __name__ == "__main__":
+ main(sys.argv[1:])
--- /dev/null
+#!/usr/bin/env bash
+# -*- indent-tabs-mode: nil; sh-basic-offset: 2; sh-indentation: 2 -*-
+# vi: set et sw=2:
+
+# The sh-indentation in the emacs mode-line is needed for emacs <26, see
+# https://debbugs.gnu.org/21751
+
+#
+# TODO:
+#
+# * Check module names entered as dependencies.
+
+set -e
+
+canonicalname(){
+ if test $# -ne 1; then
+ echo Usage: canonicalname path >&2
+ return 1
+ fi
+ file="`eval echo $1`" # expand ~
+ if test ! -e "$file"; then
+ echo $file: file not found >&2
+ return 1
+ fi
+ # if this is a symlink, then follow the symlink
+ if test -L "$file"; then
+ fdir="`dirname \"$file\"`"
+ flink="`readlink \"$file\"`"
+ if test -e "$flink"; then
+ # these are absolute links, or links in the CWD
+ canonicalname "$flink"
+ else
+ canonicalname "$fdir/$flink"
+ fi
+ else
+ # if this is a file, then remember the filename and
+ # canonicalize the directory name
+ if test -f "$file"; then
+ fdir="`dirname \"$file\"`"
+ fname="`basename \"$file\"`"
+ fdir="`canonicalname \"$fdir\"`"
+ echo "$fdir/$fname"
+ fi
+ # if this is a directory, then create an absolute
+ # directory name and we are done
+ if test -d "$file"; then
+ (cd "$file"; pwd)
+ fi
+ fi
+}
+
+canonicalpath(){
+ if test $# -ne 1; then
+ echo Usage: canonicalpath path >&2
+ return 1
+ fi
+ dirname "$(canonicalname "$1")"
+}
+
+pkg_config_dependencies(){
+ if test $# -ne 1; then
+ echo Usage: pkg_config_dependencies name >&2
+ return 1
+ fi
+ name="$1"
+ depends="`pkg-config --variable=DEPENDENCIES $name| sed -e 's/,/ /g'`"
+ for pkg in $depends; do
+ depends="$depends `pkg_config_dependencies $pkg`"
+ done
+ echo $depends
+}
+
+# modulesexist DEPS MODULES
+#
+# DEPS is a space seperated list of modules the new module should depend on
+# MODULES is a space seperated list of modules that are known to be present
+#
+# Each name in DEPS is checked to see wether it is present in MODULES. If
+# not, is is checked whether it can be found with pkg-config. If still no, an
+# error message is issued and modulesexist will return with an exit status
+# indicating failure.
+modulesexist(){
+ local status dep found
+ status=0
+
+ for dep in $1; do
+ found=false
+ if [[ " $2 " == *" $dep "* ]]; then
+ found=true
+ fi
+ # If module not found in list, try pkg-config
+ if ! $found && pkg-config $dep &> /dev/null; then
+ found=true
+ fi
+ if ! $found; then
+ echo "ERROR:">&2
+ echo "Module with name $dep was not found" >&2
+ echo "Did you forget to specify its location" >&2
+ echo "in the DUNE_CONTROL_PATH variable?">&2
+ echo >&2
+ status=1
+ fi
+ done
+
+ return $status
+}
+
+make_unique(){
+ if [ "$#" = "1" ]; then
+ # take first word
+ for exclude_word in $1; do
+ break;
+ done
+ make_unique $exclude_word "$1" 0
+ else
+ local exclude_word="$1"
+ local words="$2"
+ local pos="$3"
+ local length=0
+ local i=0
+ local new_words=""
+ local cur=0
+ for word in $words; do
+ if [ $i -le $pos ]; then
+ i=$((i+1))
+ length=$((length+1))
+ new_words="$new_words $word"
+ continue
+ fi
+ if [ "$word" != "$exclude_word" ]; then
+ new_words="$new_words $word"
+ if [ "$((length-1))" = "$pos" ]; then
+ next_word="$word"
+ fi
+ length=$((length+1))
+ fi
+ done
+ if [ "$pos" -lt "$length" ]; then
+ # process next word
+ make_unique "$next_word" "$new_words" $((pos+1))
+ else
+ export UNIQUE_WORDS="$new_words"
+ fi
+ fi
+}
+
+echo
+echo == Dune project/module generator ==
+echo
+echo duneproject will assist you in the creation of a new Dune application.
+echo During this process a new directory with the name of your project will be
+echo created. This directory will hold all configuration and Makefiles and a
+echo simple example application.
+echo
+
+################## FIND AVAILABLE MODULES ##################
+
+. "$(canonicalpath $0)/../lib/dunemodules.lib"
+
+export PREFIX_DIR="`canonicalpath "$0"`/.."
+
+extract_multiarch_pkg_config_path
+
+# search for modules, both installed and src modules
+find_modules_in_path
+
+# sort modules to remove duplicates
+sort_modules $FOUND_MODULES
+FOUND_MODULES=$MODULES
+
+# get the real module names
+MODULES=""
+for i in $FOUND_MODULES; do
+ mod=$(eval echo \$NAME_$i)
+ MODULES="$MODULES$mod "
+done
+
+if [ "$MODULES" = "" ]; then
+ echo "ERROR:">&2
+ echo " No dune modules were found!">&2
+ echo " Did you forget to specify the places where ">&2
+ echo " you installed your modules in the ">&2
+ echo " DUNE_CONTROL_PATH environment variable">&2
+ echo " and adjusted the PKG_CONFIG_PATH environment">&2
+ echo " accordingly?" >&2
+ exit 1;
+fi
+
+################## READ CMDLINE OPTIONS ##########
+PROJECT="$1"
+DEPENDENCIES="$2"
+VERSION="$3"
+MAINTAINER="$4"
+
+################## READ OPTIONS ##################
+
+while [ "$DATACORRECT" != "y" -a "$DATACORRECT" != "Y" ]; do
+
+ while [ -z $PROJECT ]; do
+ read -p "1) Name of your new Project? (e.g.: dune-grid): " PROJECT
+ if echo "$MODULES" | grep -q ^$PROJECT$; then
+ read -p " A module named $PROJECT already exists. Continue anyway? [y/N] " CONT
+ if test x$DELETE = xy -o x$DELETE = xY; then
+ PROJECT=""
+ fi
+ elif echo "$PROJECT" | grep -q "\."; then
+ echo "The Name contains a dot (.) which is not allowed."
+ PROJECT=""
+ fi
+ done
+ MODULE="$PROJECT"
+
+ DEPOK=1
+
+ while [ "$DEPOK" != 0 ]; do
+ echo "2) Which modules should this module depend on?"
+ echo " The following modules have been found:"
+ echo " $MODULES"
+ # for i in $MODULES; do echo -n " $i"; done
+ # echo ""
+ while [ -z "$DEPENDENCIES" ]; do
+ read -p " Enter space-separated list: " DEPENDENCIES
+ done
+ set +e
+ modulesexist "$DEPENDENCIES" "$MODULES"
+ DEPOK=$?
+ set -e
+ if [ "$DEPOK" != 0 ]; then
+ DEPENDENCIES=""
+ fi
+ done
+
+ while [ -z $VERSION ]; do
+ read -p "3) Project/Module version? " VERSION
+ done
+ while [ -z "$MAINTAINER" ]; do
+ read -p "4) Maintainer's email address? " MAINTAINER
+ done
+
+ echo
+ echo "creating Project \"$PROJECT\", version $VERSION "
+ echo "which depends on \"$DEPENDENCIES\""
+ echo "with maintainer \"$MAINTAINER\""
+ read -p "Are these informations correct? [y/N] " DATACORRECT
+
+ # reset data if necessary
+ if [ "$DATACORRECT" != "y" -a "$DATACORRECT" != "Y" ]; then
+ PROJECT=""
+ DEPENDENCIES=""
+ VERSION=""
+ MAINTAINER=""
+ fi
+
+done
+
+
+
+echo
+echo "A sample code $MODULE.cc is generated in the \"$PROJECT\" directory."
+echo "Look at the README and dune.module files there."
+echo "Now you can run the dunecontrol script which will setup the new module."
+echo "Sometimes you may have to tweak CMakeLists.txt a bit."
+
+if test -d $PROJECT; then
+ echo WARNING:
+ echo "A directory with the name $PROJECT already exists."
+ echo "Do you want to continue anyway?"
+ read -p "Type Y to overwrite the old directory, N to abort. [y/N] " DELETE
+ if test x$DELETE != xy -a x$DELETE != xY; then
+ echo Abort...
+ exit 1
+ fi
+ rm -rf "$PROJECT"
+fi
+mkdir "$PROJECT"
+
+################## dune.module ##################
+cat > "$PROJECT/dune.module" <<C_DELIM
+################################
+# Dune module information file #
+################################
+
+# Name of the module
+Module: $MODULE
+Version: $VERSION
+Maintainer: $MAINTAINER
+# Required build dependencies
+Depends: $DEPENDENCIES
+# Optional build dependencies
+#Suggests:
+C_DELIM
+
+## Create the parameters passed to DUNE_CHECK_ALL
+
+# save module list of dunemodules.inc
+save_MODULES=$MODULES
+for name in $DEPENDENCIES; do
+ mod="`fix_variable_name $name`"
+ if test "x$(eval echo \$HAVE_$mod)" != "x"; then
+ # found via dunemodules.inc
+ sort_modules "$mod"
+ for mod in $MODULES; do
+ M_DEPS="$M_DEPS $(eval echo \$NAME_$mod)"
+ done
+ MODULES=$save_MODULES
+ else
+ # found via pkg-config
+ M_DEPS="`pkg_config_dependencies $name` $name"
+ fi
+ for dep in $M_DEPS; do
+ CHECK="$CHECK [$dep]"
+ done
+done
+set +x
+make_unique "$CHECK"
+
+# insert , between modules
+j=0
+for dep in $UNIQUE_WORDS; do
+ if [ "$j" = "0" ]; then
+ CHECK="$dep"
+ j=1
+ else
+ CHECK="$CHECK, $dep"
+ fi
+done
+
+echo "------------------------------------------"
+echo "writing initial files:"
+
+# complete module name with _ instead of - to not confuse automake
+fix_and_assign CMODULE $MODULE
+# module name without prefix "dune-"
+NAME=`echo $PROJECT | sed -e 's/dune[_-]//'`
+# $NAME with _ instead of - to not confuse automake
+NAME_=`echo $NAME | tr '-' '_'`
+# module name in uppercase with _ instead of -
+UNAME=`echo $PROJECT | tr '-' '_' | sed 's/\(.*\)/\U\1/'`
+
+################## README ##################
+echo "- $PROJECT/README"
+cat > "$PROJECT/README" <<R_DELIM
+Preparing the Sources
+=========================
+
+Additional to the software mentioned in README you'll need the
+following programs installed on your system:
+
+ cmake >= 3.13
+
+Getting started
+---------------
+
+If these preliminaries are met, you should run
+
+ dunecontrol all
+
+which will find all installed dune modules as well as all dune modules
+(not installed) which sources reside in a subdirectory of the current
+directory. Note that if dune is not installed properly you will either
+have to add the directory where the dunecontrol script resides (probably
+./dune-common/bin) to your path or specify the relative path of the script.
+
+Most probably you'll have to provide additional information to dunecontrol
+(e. g. compilers, configure options) and/or make options.
+
+The most convenient way is to use options files in this case. The files
+define four variables:
+
+CMAKE_FLAGS flags passed to cmake (during configure)
+
+An example options file might look like this:
+
+#use this options to configure and make if no other options are given
+CMAKE_FLAGS=" \\
+-DCMAKE_CXX_COMPILER=g++-5 \\
+-DCMAKE_CXX_FLAGS='-Wall -pedantic' \\
+-DCMAKE_INSTALL_PREFIX=/install/path" #Force g++-5 and set compiler flags
+
+If you save this information into example.opts you can pass the opts file to
+dunecontrol via the --opts option, e. g.
+
+ dunecontrol --opts=example.opts all
+
+More info
+---------
+
+See
+
+ dunecontrol --help
+
+for further options.
+
+
+The full build system is described in the dune-common/doc/buildsystem (Git version) or under share/doc/dune-common/buildsystem if you installed DUNE!
+R_DELIM
+
+################## CMakeLists.txt ##################
+echo "- $PROJECT/CMakeLists.txt"
+cat> "$PROJECT/CMakeLists.txt" << M_DELIM
+cmake_minimum_required(VERSION 3.13)
+project($PROJECT CXX)
+
+if(NOT (dune-common_DIR OR dune-common_ROOT OR
+ "\${CMAKE_PREFIX_PATH}" MATCHES ".*dune-common.*"))
+ string(REPLACE \${PROJECT_NAME} dune-common dune-common_DIR
+ \${PROJECT_BINARY_DIR})
+endif()
+
+#find dune-common and set the module path
+find_package(dune-common REQUIRED)
+list(APPEND CMAKE_MODULE_PATH "\${PROJECT_SOURCE_DIR}/cmake/modules"
+ \${dune-common_MODULE_PATH})
+
+#include the dune macros
+include(DuneMacros)
+
+# start a dune project with information from dune.module
+dune_project()
+
+dune_enable_all_packages()
+
+add_subdirectory(src)
+add_subdirectory(dune)
+add_subdirectory(doc)
+add_subdirectory(cmake/modules)
+
+# finalize the dune project, e.g. generating config.h etc.
+finalize_dune_project(GENERATE_CONFIG_H_CMAKE)
+M_DELIM
+
+################## PROJECT.PC.IN ##################
+echo "- $PROJECT/$MODULE.pc.in"
+cat> "$PROJECT/$MODULE.pc.in" << CC_DELIM
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+includedir=@includedir@
+CXX=@CXX@
+CC=@CC@
+DEPENDENCIES=@REQUIRES@
+
+Name: @PACKAGE_NAME@
+Version: @VERSION@
+Description: $MODULE module
+URL: http://dune-project.org/
+Requires: ${DEPENDENCIES}
+Libs: -L\${libdir}
+Cflags: -I\${includedir}
+CC_DELIM
+echo " Please remember to update your $PROJECT/$MODULE.pc.in,"
+echo " Description and URL are missing right now."
+
+################# config.h.cmake #####################
+
+echo "- $PROJECT/config.h.cmake"
+cat> "$PROJECT/config.h.cmake" <<EOF
+/* begin $PROJECT
+ put the definitions for config.h specific to
+ your project here. Everything above will be
+ overwritten
+*/
+
+/* begin private */
+/* Name of package */
+#define PACKAGE "@DUNE_MOD_NAME@"
+
+/* Define to the address where bug reports for this package should be sent. */
+#define PACKAGE_BUGREPORT "@DUNE_MAINTAINER@"
+
+/* Define to the full name of this package. */
+#define PACKAGE_NAME "@DUNE_MOD_NAME@"
+
+/* Define to the full name and version of this package. */
+#define PACKAGE_STRING "@DUNE_MOD_NAME@ @DUNE_MOD_VERSION@"
+
+/* Define to the one symbol short name of this package. */
+#define PACKAGE_TARNAME "@DUNE_MOD_NAME@"
+
+/* Define to the home page for this package. */
+#define PACKAGE_URL "@DUNE_MOD_URL@"
+
+/* Define to the version of this package. */
+#define PACKAGE_VERSION "@DUNE_MOD_VERSION@"
+
+/* end private */
+
+/* Define to the version of $PROJECT */
+#define ${UNAME}_VERSION "@${UNAME}_VERSION@"
+
+/* Define to the major version of $PROJECT */
+#define ${UNAME}_VERSION_MAJOR @${UNAME}_VERSION_MAJOR@
+
+/* Define to the minor version of $PROJECT */
+#define ${UNAME}_VERSION_MINOR @${UNAME}_VERSION_MINOR@
+
+/* Define to the revision of $PROJECT */
+#define ${UNAME}_VERSION_REVISION @${UNAME}_VERSION_REVISION@
+
+/* end $PROJECT
+ Everything below here will be overwritten
+*/
+EOF
+## done
+
+###############################################################
+################## The source subdirectory ####################
+###############################################################
+
+mkdir "$PROJECT/src"
+
+################## src/CMakeLists.txt ##################
+
+echo "- $PROJECT/src/CMakeLists.txt"
+cat> "$PROJECT/src/CMakeLists.txt" << M_DELIM
+add_executable("${MODULE}" ${MODULE}.cc)
+target_link_dune_default_libraries("${MODULE}")
+M_DELIM
+
+################## PROJECT.CC ##################
+echo "- $PROJECT/src/$MODULE.cc"
+cat> "$PROJECT/src/$MODULE.cc" << CC_DELIM
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+#include <iostream>
+#include <dune/common/parallel/mpihelper.hh> // An initializer of MPI
+#include <dune/common/exceptions.hh> // We use exceptions
+
+int main(int argc, char** argv)
+{
+ try{
+ // Maybe initialize MPI
+ Dune::MPIHelper& helper = Dune::MPIHelper::instance(argc, argv);
+ std::cout << "Hello World! This is ${PROJECT}." << std::endl;
+ if(Dune::MPIHelper::isFake)
+ std::cout<< "This is a sequential program." << std::endl;
+ else
+ std::cout<<"I am rank "<<helper.rank()<<" of "<<helper.size()
+ <<" processes!"<<std::endl;
+ return 0;
+ }
+ catch (Dune::Exception &e){
+ std::cerr << "Dune reported error: " << e << std::endl;
+ }
+ catch (...){
+ std::cerr << "Unknown exception thrown!" << std::endl;
+ }
+}
+CC_DELIM
+
+################################################################
+################## The headers subdirectory ####################
+################################################################
+
+echo "- $PROJECT/dune/$NAME"
+mkdir "$PROJECT/dune"
+mkdir "$PROJECT/dune/$NAME"
+
+################## dune/CMakeLists.txt #################
+echo "- $PROJECT/dune/CMakeLists.txt"
+cat> $PROJECT/dune/CMakeLists.txt <<EOF
+add_subdirectory($NAME)
+EOF
+
+################## dune/$NAME/CMakeLists.txt ###########
+echo "- $PROJECT/dune/$NAME/CMakeLists.txt"
+cat> $PROJECT/dune/$NAME/CMakeLists.txt <<EOF
+#install headers
+install(FILES ${NAME}.hh DESTINATION \${CMAKE_INSTALL_INCLUDEDIR}/dune/$NAME)
+EOF
+
+################## dune/$NAME/$NAME.hh #################
+echo "- $PROJECT/dune/$NAME/$NAME.hh"
+cat> $PROJECT/dune/$NAME/$NAME.hh <<EOF
+#ifndef ${UNAME}_HH
+#define ${UNAME}_HH
+
+// add your classes here
+
+#endif // ${UNAME}_HH
+EOF
+
+
+###############################################################
+################## The doc subdirectory #######################
+###############################################################
+
+mkdir "$PROJECT/doc"
+
+################## doc/CMakeLists.txt #################
+echo "- $PROJECT/doc/CMakeLists.txt"
+cat> "$PROJECT/doc/CMakeLists.txt" << CC_DELIM
+add_subdirectory("doxygen")
+CC_DELIM
+
+###############################################################
+############### The doc/doxygen subdirectory ##################
+###############################################################
+
+mkdir "$PROJECT/doc/doxygen"
+
+#################### basic Doxylocal ##########################
+
+echo "- $PROJECT/doc/doxygen/Doxylocal"
+if [ "x`which doxygen`" == "x" ]; then
+ echo "Doxygen is not installed! Your documentation will not work without it."
+fi
+# Where to search and which files to use
+cat> $PROJECT/doc/doxygen/Doxylocal << CC_DELIM
+# This file contains local changes to the doxygen configuration
+# please use '+=' to add files/directories to the lists
+
+# The INPUT tag can be used to specify the files and/or directories that contain
+# documented source files. You may enter file names like "myfile.cpp" or
+# directories like "/usr/src/myproject". Separate the files or directories
+# with spaces.
+
+INPUT += @top_srcdir@/dune/
+# see e.g. dune-grid for the examples of mainpage and modules
+# INPUT += @srcdir@/mainpage \\
+# @srcdir@/modules
+
+# The EXCLUDE tag can be used to specify files and/or directories that should
+# be excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+
+# EXCLUDE += @top_srcdir@/dune/$NAME/test
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or
+# directories that contain example code fragments that are included (see
+# the \include command).
+
+# EXAMPLE_PATH += @top_srcdir@/src
+
+# The IMAGE_PATH tag can be used to specify one or more files or
+# directories that contain image that are included in the documentation (see
+# the \image command).
+
+# IMAGE_PATH += @top_srcdir@/dune/$NAME/pics
+CC_DELIM
+
+################# doc/doxygen/CMakeLists.txt #####################
+
+echo "- $PROJECT/doc/doxygen/CMakeLists.txt"
+cat> "$PROJECT/doc/doxygen/CMakeLists.txt" << CC_DELIM
+# shortcut for creating the Doxyfile.in and Doxyfile
+add_doxygen_target()
+CC_DELIM
+
+#########################################################
+############### The cmake subdirectory ##################
+#########################################################
+
+mkdir "$PROJECT/cmake"
+
+#########################################################
+############### The cmake/modules subdirectory ##########
+#########################################################
+
+mkdir "$PROJECT/cmake/modules"
+
+macroname=""
+for i in $(echo $PROJECT| sed 's/-/ /g'); do
+ firstchar=$(echo $i | sed 's/\(.\).*/\1/')
+ macroname=$macroname$(echo $firstchar | tr '[a-z]' '[A-Z]')$(echo $i | sed 's/.\(.*\)/\1/')
+done
+macroname="$macroname""Macros.cmake"
+
+################# cmake/modules/CMakeLists.txt #####################
+
+echo "- $PROJECT/cmake/modules/CMakeLists.txt"
+cat> "$PROJECT/cmake/modules/CMakeLists.txt" <<EOF
+set(modules "$macroname")
+
+install(FILES \${modules} DESTINATION \${DUNE_INSTALL_MODULEDIR})
+EOF
+
+################# cmake/modules/$macroname #####################
+
+echo "- $PROJECT/cmake/modules/$macroname"
+cat> "$PROJECT/cmake/modules/$macroname" <<EOF
+# File for module specific CMake tests.
+EOF
+
+
+################# done #####################
+
+echo
+echo "done."
+echo "------------------------------------------"
+echo "For further details read the Dune build system documentation at"
+echo "https://www.dune-project.org/buildsystem/"
--- /dev/null
+try:
+ from dune.common.module import resolve_dependencies, resolve_order, select_modules
+
+except ImportError:
+ import os
+ here = os.path.dirname(os.path.abspath(__file__))
+ mods = os.path.join(os.path.dirname(here), "python", "dune", "common")
+ if os.path.exists(os.path.join(mods, "module.py")):
+ import sys
+ sys.path.append(mods)
+ from module import resolve_dependencies, resolve_order, select_modules
+ else:
+ raise
+
+print("Found Modules:")
+print("--------------")
+modules, _ = select_modules()
+for description in modules.values():
+ print(repr(description))
+ print()
+
+print()
+print("Resolved Dependencies:")
+print("----------------------")
+deps = resolve_dependencies(modules)
+for mod_name, mod_deps in deps.items():
+ print(mod_name + ": " + " ".join(mod_deps))
+
+print()
+print("Build Order:")
+print("------------")
+print(" ".join(resolve_order(deps)))
--- /dev/null
+#!/usr/bin/env python
+
+import glob, os, sys, re, fileinput
+
+import dune.common.module
+dune_py_dir = dune.common.module.get_dune_py_dir()
+generated_dir = os.path.join(dune_py_dir, 'python', 'dune', 'generated')
+
+from argparse import ArgumentParser
+parser = ArgumentParser(description='Removing generated module from dune-py')
+parser.add_argument('-a', '--all', help='remove all modules', action='store_true', default=False)
+parser.add_argument('modules', metavar='M', nargs='*',
+ help='base of the modules to remove')
+
+try:
+ args = parser.parse_args()
+except:
+ sys.exit(0)
+
+moduleFiles = set()
+
+if args.all:
+ base = os.path.join(generated_dir, '*.so')
+ for filename in glob.iglob( base ):
+ os.remove( filename )
+ os.remove( os.path.splitext(filename)[0]+'.cc' )
+ moduleFiles.update( [os.path.splitext(os.path.basename(filename))[0]] )
+elif len(args.modules)>0:
+ for m in args.modules:
+ base = os.path.join(generated_dir, m+'*')
+ for filename in glob.iglob( base ):
+ os.remove( filename )
+ moduleFiles.update( [os.path.splitext(os.path.basename(filename))[0]] )
+else:
+ parser.print_help()
+ sys.exit(0)
+
+for line in fileinput.input( os.path.join(generated_dir, 'CMakeLists.txt'), inplace = True):
+ if not any( [m in line for m in moduleFiles] ):
+ print(line, end="")
--- /dev/null
+#!/usr/bin/env python3
+import getopt
+import os
+import shlex
+import subprocess
+import sys
+import shutil
+import logging
+
+logger = logging.getLogger(__name__)
+
+try:
+ from dune.common.module import build_dune_py_module, get_dune_py_dir, make_dune_py_module, select_modules, resolve_dependencies, resolve_order
+ from dune.common.locking import Lock, LOCK_EX
+except ImportError:
+ import os
+ here = os.path.dirname(os.path.abspath(__file__))
+ modsA = os.path.join(os.path.dirname(here), "python", "dune")
+ modsB = os.path.join(modsA,"common")
+ sys.path.append(modsB)
+ sys.path.append(modsA)
+ if os.path.exists(os.path.join(modsB, "module.py")):
+ from module import build_dune_py_module, get_dune_py_dir, make_dune_py_module, select_modules, resolve_dependencies, resolve_order
+ from locking import Lock, LOCK_EX
+ else:
+ raise
+
+def buffer_to_str(b):
+ return b if sys.version_info.major == 2 else b.decode('utf-8')
+
+def toBuildDir(builddir, moddir, module):
+ if os.path.isabs(builddir):
+ return os.path.join(builddir ,module)
+ else:
+ return os.path.join(moddir, builddir)
+
+def main(argv):
+ try:
+ opts, args = getopt.getopt(argv,"ho",["opts=","builddir=","module="])
+ except getopt.GetoptError:
+ print('usage: setup-dunepy.py [-o config.opts | --opts=config.opts | --builddir] [--module=mod] [install]')
+ sys.exit(2)
+
+ optsfile = None
+ builddir = None
+ masterModule = None
+ for opt, arg in opts:
+ if opt == '-h':
+ print('usage: setup-dunepy.py [-o config.opts | --opts=config.opts] [install]')
+ sys.exit(2)
+ elif opt in ("-o", "--opts"):
+ optsfile = arg
+ elif opt in ("--builddir",):
+ builddir = arg
+ elif opt in ("--module",):
+ masterModule = arg
+ if len(args) > 0:
+ execute = args[0]
+ else:
+ execute = ""
+
+ # see if the standard Dune enviroment variable for the opts file is defined
+ if optsfile is None:
+ optsfile = os.environ.get('DUNE_OPTS_FILE', None)
+
+ if optsfile is not None:
+ definitions = {}
+ command = ['bash', '-c', 'source ' + optsfile + ' && echo "$CMAKE_FLAGS"']
+ proc = subprocess.Popen(command, stdout = subprocess.PIPE)
+ stdout, _ = proc.communicate()
+ cmake_args = shlex.split(buffer_to_str(stdout))
+ if builddir is None:
+ # get the build dir (check for BUILDDIR, DUNE_BUILDDIR in opts file
+ # and then DUNE_BUILDDIR in environment variable
+ command = ['bash', '-c', 'source ' + optsfile + ' && echo "$BUILDDIR"']
+ proc = subprocess.Popen(command, stdout = subprocess.PIPE)
+ stdout, _ = proc.communicate()
+ builddir = buffer_to_str(stdout).strip()
+ if not builddir:
+ command = ['bash', '-c', 'source ' + optsfile + ' && echo "$DUNE_BUILDDIR"']
+ proc = subprocess.Popen(command, stdout = subprocess.PIPE)
+ stdout, _ = proc.communicate()
+ builddir = buffer_to_str(stdout).strip()
+ if not builddir:
+ builddir = os.environ.get('DUNE_BUILDDIR', 'build-cmake')
+ else:
+ cmake_args = None
+ if builddir is None:
+ builddir = os.environ.get('DUNE_BUILDDIR', 'build-cmake')
+
+ # Generate list of all modules
+ duneModules = select_modules()
+
+ # Generate list of dependencies for dune-py. If --module=mod is passed,
+ # use mod and all its dependencies only. Otherwise use all found modules
+ # as dependencies.
+ if masterModule is None:
+ deps = resolve_order(duneModules[0])
+ else:
+ depsList = resolve_dependencies(duneModules[0], masterModule)
+ deps = {k:v for k,v in duneModules[0].items() if k in depsList}
+ deps = resolve_order(deps)
+ deps += [masterModule]
+
+ if execute == "install":
+ for m in deps:
+ moddir = duneModules[1][m]
+ pythonModule = toBuildDir(builddir,moddir,m)
+ print("calling install_python for %s (%s)" % (m,pythonModule))
+ try:
+ command = ['cmake', '--build', '.', '--target', 'install_python']
+ proc = subprocess.Popen(command, cwd=pythonModule, stdout = subprocess.PIPE)
+ stdout, stderr = proc.communicate()
+ logger.debug(buffer_to_str(stdout))
+ except FileNotFoundError:
+ print("Warning: build dir not found possibly module is installed then python bindings should be already available")
+
+ dunepy = get_dune_py_dir()
+ dunepyBase = os.path.realpath( os.path.join(dunepy,"..") )
+ if not os.path.exists(dunepyBase):
+ os.makedirs(dunepyBase)
+ with Lock(os.path.join(dunepyBase, 'lock-module.lock'), flags=LOCK_EX):
+ if os.path.exists(dunepy):
+ shutil.rmtree(dunepy)
+ os.makedirs(dunepy)
+ foundModule = make_dune_py_module(dunepy, deps)
+ output = build_dune_py_module(dunepy, cmake_args, None, builddir, deps, writetagfile=True)
+
+ print("CMake output")
+ print(output)
+
+if __name__ == "__main__":
+ main(sys.argv[1:])
--- /dev/null
+add_subdirectory(modules)
+add_subdirectory(scripts)
--- /dev/null
+# Defines the functions to use BLAS/Lapack
+#
+# .. cmake_function:: add_dune_blas_lapack_flags
+#
+# .. cmake_param:: targets
+# :positional:
+# :single:
+# :required:
+#
+# A list of targets to use BLAS/Lapack with.
+#
+include_guard(GLOBAL)
+
+set_package_properties("BLAS" PROPERTIES
+ DESCRIPTION "fast linear algebra routines")
+set_package_properties("LAPACK" PROPERTIES
+ DESCRIPTION "fast linear algebra routines")
+
+# register HAVE_BLAS and HAVE_LAPACK for config.h
+set(HAVE_BLAS ${BLAS_FOUND})
+set(HAVE_LAPACK ${LAPACK_FOUND})
+
+# register Lapack library as dune package
+if(HAVE_LAPACK)
+ dune_register_package_flags(LIBRARIES "${LAPACK_LIBRARIES}")
+ cmake_push_check_state()
+ set(CMAKE_REQUIRED_LIBRARIES ${LAPACK_LIBRARIES})
+ check_function_exists("dsyev_" LAPACK_NEEDS_UNDERLINE)
+ cmake_pop_check_state()
+elseif(HAVE_BLAS)
+ dune_register_package_flags(LIBRARIES "${BLAS_LIBRARIES}")
+endif()
+
+# add function to link against the BLAS/Lapack library
+function(add_dune_blas_lapack_flags _targets)
+ foreach(_target ${_targets})
+ if(LAPACK_FOUND)
+ target_link_libraries(${_target} PUBLIC ${LAPACK_LIBRARIES})
+ elseif(BLAS_FOUND)
+ target_link_libraries(${_target} PUBLIC ${BLAS_LIBRARIES})
+ endif()
+ endforeach(_target)
+endfunction(add_dune_blas_lapack_flags)
--- /dev/null
+# Defines the functions to use GMP
+#
+# .. cmake_function:: add_dune_gmp_flags
+#
+# .. cmake_param:: targets
+# :positional:
+# :single:
+# :required:
+#
+# A list of targets to use GMP with.
+#
+include_guard(GLOBAL)
+
+# set HAVE_GMP for the config.h file
+set(HAVE_GMP ${GMP_FOUND})
+
+# register all GMP related flags
+if(GMP_FOUND)
+ dune_register_package_flags(
+ LIBRARIES GMP::gmpxx
+ COMPILE_DEFINITIONS "ENABLE_GMP=1"
+ )
+endif()
+
+# add function to link against the GMP library
+function(add_dune_gmp_flags _targets)
+ if(GMP_FOUND)
+ foreach(_target ${_targets})
+ target_link_libraries(${_target} PUBLIC GMP::gmpxx)
+ target_compile_definitions(${_target} PUBLIC ENABLE_GMP=1)
+ endforeach(_target ${_targets})
+ endif(GMP_FOUND)
+endfunction(add_dune_gmp_flags)
--- /dev/null
+# Defines the functions to use METIS
+#
+# .. cmake_function:: add_dune_metis_flags
+#
+# .. cmake_param:: targets
+# :positional:
+# :single:
+# :required:
+#
+# A list of targets to use METIS with.
+#
+include_guard(GLOBAL)
+
+# register HAVE_METIS for config.h
+set(HAVE_METIS ${METIS_FOUND})
+
+# register METIS library as dune package
+if(METIS_FOUND)
+ dune_register_package_flags(LIBRARIES METIS::METIS)
+endif()
+
+# Add function to link targets against METIS library
+function(add_dune_metis_flags _targets)
+ if(METIS_FOUND)
+ foreach(_target ${_targets})
+ target_link_libraries(${_target} PUBLIC METIS::METIS)
+ endforeach(_target)
+ endif()
+endfunction(add_dune_metis_flags _targets)
--- /dev/null
+# The DUNE way to compile MPI applications is to use the CXX
+# compiler with MPI flags usually used for C. CXX bindings
+# are deactivated to prevent ABI problems.
+#
+# .. cmake_function:: add_dune_mpi_flags
+#
+# .. cmake_param:: targets
+# :single:
+# :required:
+# :positional:
+#
+# The target list to add the MPI flags to.
+#
+include_guard(GLOBAL)
+
+# text for feature summary
+set_package_properties("MPI" PROPERTIES
+ DESCRIPTION "Message Passing Interface library"
+ PURPOSE "Parallel programming on multiple processors")
+
+if(MPI_C_FOUND)
+ set(HAVE_MPI ${MPI_C_FOUND})
+
+ dune_register_package_flags(COMPILE_DEFINITIONS "ENABLE_MPI=1"
+ LIBRARIES MPI::MPI_C)
+endif()
+
+# adds MPI flags to the targets
+function(add_dune_mpi_flags)
+ cmake_parse_arguments(ADD_MPI "SOURCE_ONLY;OBJECT" "" "" ${ARGN}) # ignored
+ set(targets ${ADD_MPI_UNPARSED_ARGUMENTS})
+
+ if(MPI_C_FOUND)
+ foreach(target ${targets})
+ target_link_libraries(${target} PUBLIC MPI::MPI_C)
+ target_compile_definitions(${target} PUBLIC "ENABLE_MPI=1")
+ endforeach(target)
+ endif()
+endfunction(add_dune_mpi_flags)
--- /dev/null
+# Defines the functions to use PTScotch
+#
+# .. cmake_function:: add_dune_ptscotch_flags
+#
+# .. cmake_param:: targets
+# :positional:
+# :single:
+# :required:
+#
+# A list of targets to use PTScotch with.
+#
+include_guard(GLOBAL)
+
+# set HAVE_PTSCOTCH for config.h
+set(HAVE_PTSCOTCH ${PTScotch_FOUND})
+
+# register all PTScotch related flags
+if(PTScotch_SCOTCH_FOUND)
+ dune_register_package_flags(LIBRARIES PTScotch::Scotch)
+endif()
+if(PTScotch_PTSCOTCH_FOUND)
+ dune_register_package_flags(LIBRARIES PTScotch::PTScotch)
+endif()
+
+function(add_dune_ptscotch_flags _targets)
+ if(PTScotch_SCOTCH_FOUND)
+ foreach(_target ${_targets})
+ target_link_libraries(${_target} PUBLIC PTScotch::Scotch)
+ endforeach(_target ${_targets})
+ endif()
+ if(PTScotch_PTSCOTCH_FOUND)
+ foreach(_target ${_targets})
+ target_link_libraries(${_target} PUBLIC PTScotch::PTScotch)
+ endforeach(_target ${_targets})
+ endif()
+endfunction(add_dune_ptscotch_flags)
--- /dev/null
+# Defines the functions to use ParMETIS
+#
+# .. cmake_function:: add_dune_parmetis_flags
+#
+# .. cmake_param:: targets
+# :positional:
+# :single:
+# :required:
+#
+# A list of targets to use ParMETIS with.
+#
+include_guard(GLOBAL)
+
+# set HAVE_PARMETIS for config.h
+set(HAVE_PARMETIS ${ParMETIS_FOUND})
+
+# register all ParMETIS related flags
+if(ParMETIS_FOUND)
+ dune_register_package_flags(LIBRARIES ParMETIS::ParMETIS)
+endif()
+
+# add function to link against the ParMETIS library
+function(add_dune_parmetis_flags _targets)
+ if(ParMETIS_FOUND)
+ foreach(_target ${_targets})
+ target_link_libraries(${_target} PUBLIC ParMETIS::ParMETIS)
+ endforeach(_target)
+ endif()
+endfunction(add_dune_parmetis_flags)
--- /dev/null
+# Defines the functions to use QuadMath
+#
+# .. cmake_function:: add_dune_quadmath_flags
+#
+# .. cmake_param:: targets
+# :positional:
+# :single:
+# :required:
+#
+# A list of targets to use QuadMath with.
+#
+include_guard(GLOBAL)
+
+# set HAVE_QUADMATH for config.h
+set(HAVE_QUADMATH ${QuadMath_FOUND})
+
+# register the QuadMath imported target
+if(QuadMath_FOUND)
+ dune_register_package_flags(
+ LIBRARIES QuadMath::QuadMath
+ COMPILE_DEFINITIONS "ENABLE_QUADMATH=1"
+ )
+endif()
+
+# add function to link against QuadMath::QuadMath
+function(add_dune_quadmath_flags _targets)
+ if(QuadMath_FOUND)
+ foreach(_target ${_targets})
+ target_link_libraries(${_target} PUBLIC QuadMath::QuadMath)
+ target_compile_definitions(${_target} PUBLIC ENABLE_QUADMATH=1)
+ endforeach(_target ${_targets})
+ endif()
+endfunction(add_dune_quadmath_flags)
--- /dev/null
+# Defines the functions to use SuiteSparse
+#
+# .. cmake_function:: add_dune_suitesparse_flags
+#
+# .. cmake_param:: targets
+# :positional:
+# :single:
+# :required:
+#
+# A list of targets to use SuiteSparse with.
+#
+include_guard(GLOBAL)
+
+# set HAVE_SUITESPARSE for config.h
+set(HAVE_SUITESPARSE ${SuiteSparse_FOUND})
+set(HAVE_UMFPACK ${SuiteSparse_UMFPACK_FOUND})
+
+# register all SuiteSparse related flags
+if(SuiteSparse_FOUND)
+ dune_register_package_flags(
+ COMPILE_DEFINITIONS "ENABLE_SUITESPARSE=1"
+ LIBRARIES SuiteSparse::SuiteSparse)
+endif()
+
+# Provide function to set target properties for linking to SuiteSparse
+function(add_dune_suitesparse_flags _targets)
+ if(SuiteSparse_FOUND)
+ foreach(_target ${_targets})
+ target_link_libraries(${_target} PUBLIC SuiteSparse::SuiteSparse)
+ target_compile_definitions(${_target} PUBLIC ENABLE_SUITESPARSE=1)
+ endforeach(_target)
+ endif()
+endfunction(add_dune_suitesparse_flags)
--- /dev/null
+# Defines the functions to use TBB
+#
+# .. cmake_function:: add_dune_tbb_flags
+#
+# .. cmake_param:: targets
+# :positional:
+# :single:
+# :required:
+#
+# A list of targets to use TBB with.
+#
+include_guard(GLOBAL)
+
+# set variable for config.h
+set(HAVE_TBB ${TBB_FOUND})
+
+# perform DUNE-specific setup tasks
+if (TBB_FOUND)
+ dune_register_package_flags(
+ COMPILE_DEFINITIONS ENABLE_TBB=1
+ LIBRARIES TBB::tbb
+ )
+endif()
+
+# function for adding TBB flags to a list of targets
+function(add_dune_tbb_flags _targets)
+ if(TBB_FOUND)
+ foreach(_target ${_targets})
+ target_link_libraries(${_target} PUBLIC TBB::tbb)
+ target_compile_definitions(${_target} PUBLIC ENABLE_TBB=1)
+ endforeach(_target)
+ endif()
+endfunction(add_dune_tbb_flags)
--- /dev/null
+include_guard(GLOBAL)
+
+# text for feature summary
+set_package_properties("Threads" PROPERTIES
+ DESCRIPTION "Multi-threading library")
+
+# set HAVE_THREADS for config.h
+set(HAVE_THREADS ${Threads_FOUND})
+
+# register the Threads imported target globally
+if(Threads_FOUND)
+ link_libraries(Threads::Threads)
+endif()
--- /dev/null
+# Defines the functions to use Vc
+#
+# Vc is a library for high-level Vectorization support in C++
+# see https://github.com/VcDevel/Vc
+#
+# .. cmake_function:: add_dune_vc_flags
+#
+# .. cmake_param:: targets
+# :positional:
+# :single:
+# :required:
+#
+# A list of targets to use VC with.
+#
+include_guard(GLOBAL)
+
+# text for feature summary
+set_package_properties("Vc" PROPERTIES
+ DESCRIPTION "C++ Vectorization library"
+ URL "https://github.com/VcDevel/Vc"
+ PURPOSE "For use of SIMD instructions")
+
+function(add_dune_vc_flags _targets)
+ if(Vc_FOUND)
+ foreach(_target ${_targets})
+ target_link_libraries(${_target} PUBLIC ${Vc_LIBRARIES})
+ target_compile_options(${_target} PUBLIC ${Vc_COMPILE_FLAGS})
+ target_compile_definitions(${_target} PUBLIC ENABLE_VC=1)
+ target_include_directories(${_target} SYSTEM PUBLIC ${Vc_INCLUDE_DIR})
+ endforeach(_target ${_targets})
+ endif(Vc_FOUND)
+endfunction(add_dune_vc_flags)
+
+if(Vc_FOUND)
+ dune_register_package_flags(COMPILE_OPTIONS "${Vc_COMPILE_FLAGS};-DENABLE_VC=1"
+ LIBRARIES "${Vc_LIBRARIES}"
+ INCLUDE_DIRS "${Vc_INCLUDE_DIR}")
+endif(Vc_FOUND)
+set(HAVE_VC ${Vc_FOUND})
--- /dev/null
+# This modules contains only documentation for CMake builtins.
+# This is necessary to have an complete API documentation.
+#
+# .. cmake_function:: add_subdirectory
+#
+# .. cmake_param:: dir
+# :single:
+# :positional:
+# :required:
+#
+# The :code:`CMakeLists.txt` file from this subdirectory
+# will be executed next.
+#
+# .. cmake_param:: EXCLUDE_FROM_ALL
+# :option:
+#
+# Whether targets added in this subdirectory should be built
+# during :code:`make all`.
+#
+# This is a cmake builtin command.
+# For detailed information, check the cmake documentation:
+#
+# ::
+#
+# cmake --help-command add_subdirectory
+#
+# .. cmake_function:: install
+#
+# Define installation rules to customize the behaviour of :code:`make install`.
+#
+# This is a cmake builtin command.
+# For detailed information, check the cmake documentation:
+#
+# ::
+#
+# cmake --help-command install
+#
+# .. cmake_function:: add_executable
+#
+# Adds an executable to the project.
+#
+# This is a cmake builtin command.
+# For detailed information, check the cmake documentation:
+#
+# ::
+#
+# cmake --help-command add_executable
+#
+# .. cmake_variable:: CMAKE_<LANG>_COMPILER
+#
+# Set the compiler for the language LANG.
+# LANG is in our case out of C, CXX.
+#
+# This is a cmake builtin variable.
+# For detailed information, check the cmake documentation:
+#
+# ::
+#
+# cmake --help-variable CMAKE_\<LANG\>_COMPILER
+#
+# .. cmake_variable:: CMAKE_<LANG>_FLAGS
+#
+# Set the compile flags for the language LANG.
+# LANG is in our case out of C, CXX.
+#
+# This is a cmake builtin variable.
+# For detailed information, check the cmake documentation:
+#
+# ::
+#
+# cmake --help-variable CMAKE_\<LANG\>_FLAGS
+#
+# .. cmake_function:: find_package
+#
+# Look for an external package.
+#
+# This is a cmake builtin command.
+# For detailed information, check the cmake documentation:
+#
+# ::
+#
+# cmake --help-command find_package
+#
--- /dev/null
+add_subdirectory(FindPkgConfig)
+add_subdirectory(FindPython3)
+
+install(FILES
+ AddBLASLapackFlags.cmake
+ AddGMPFlags.cmake
+ AddMETISFlags.cmake
+ AddMPIFlags.cmake
+ AddParMETISFlags.cmake
+ AddPTScotchFlags.cmake
+ AddQuadMathFlags.cmake
+ AddTBBFlags.cmake
+ AddThreadsFlags.cmake
+ AddSuiteSparseFlags.cmake
+ AddVcFlags.cmake
+ CheckCXXFeatures.cmake
+ CMakeBuiltinFunctionsDocumentation.cmake
+ DuneAddPybind11Module.cmake
+ DuneCMakeCompat.cmake
+ DuneCommonMacros.cmake
+ DuneCxaDemangle.cmake
+ DuneDoc.cmake
+ DuneDoxygen.cmake
+ DuneEnableAllPackages.cmake
+ DuneExecuteProcess.cmake
+ DuneInstance.cmake
+ DuneMacros.cmake
+ DuneMPI.cmake
+ DunePathHelper.cmake
+ DunePkgConfig.cmake
+ DunePythonCommonMacros.cmake
+ DunePythonFindPackage.cmake
+ DunePythonInstallPackage.cmake
+ DunePythonMacros.cmake
+ DunePythonTestCommand.cmake
+ DunePythonVirtualenv.cmake
+ DuneSphinxDoc.cmake
+ DuneSphinxCMakeDoc.cmake
+ DuneStreams.cmake
+ DuneSymlinkOrCopy.cmake
+ DuneTestMacros.cmake
+ FindGMP.cmake
+ FindInkscape.cmake
+ FindLatexMk.cmake
+ FindMETIS.cmake
+ FindParMETIS.cmake
+ FindPTScotch.cmake
+ FindQuadMath.cmake
+ FindSphinx.cmake
+ FindSuiteSparse.cmake
+ FindTBB.cmake
+ Headercheck.cmake
+ latexmkrc.cmake
+ OverloadCompilerFlags.cmake
+ UseInkscape.cmake
+ UseLatexMk.cmake
+ DESTINATION ${DUNE_INSTALL_MODULEDIR})
--- /dev/null
+# .. cmake_module::
+#
+# Module that checks for supported C++20, C++17 and non-standard features.
+#
+# The behaviour of this module can be modified by the following variable:
+#
+# :ref:`DISABLE_CXX_VERSION_CHECK`
+# Disable checking for std=c++20 (c++23, ...)
+#
+# This module internally sets the following variables, which are then
+# exported into the config.h of the current dune module.
+#
+# :code:`HAS_ATTRIBUTE_UNUSED`
+# True if attribute unused is supported
+#
+# :code:`HAS_ATTRIBUTE_DEPRECATED`
+# True if attribute deprecated is supported
+#
+# :code:`HAS_ATTRIBUTE_DEPRECATED_MSG`
+# True if attribute deprecated("msg") is supported
+#
+# .. cmake_variable:: DISABLE_CXX_VERSION_CHECK
+#
+# You may set this variable to TRUE to disable checking for
+# std=c++11 (c++14, c++1y). For more details, check :ref:`CheckCXXFeatures`.
+#
+
+
+include(CMakePushCheckState)
+include(CheckCXXCompilerFlag)
+include(CheckIncludeFileCXX)
+include(CheckCXXSourceCompiles)
+include(CheckCXXSymbolExists)
+
+# C++ standard versions that this test knows about
+set(CXX_VERSIONS 20 17)
+
+
+# Compile tests for the different standard revisions; these test both the compiler
+# and the associated library to avoid problems like using a C++20 user-installed
+# compiler together with a non C++20-compliant stdlib from the system compiler.
+
+# we need to escape semicolons in the tests to be able to stick them into a list
+string(REPLACE ";" "\;" cxx_20_test
+ "
+ #include <type_traits>
+
+ // `if constexpr` is a C++20 compiler feature
+ template<typename T>
+ void f()
+ { if constexpr (T::anything) {} }
+
+ int main() {
+ // std::is_bounded_array_v is a C++20 library feature
+ return std::is_bounded_array_v<int[2]>;
+ }
+ ")
+
+string(REPLACE ";" "\;" cxx_17_test
+ "
+ #include <type_traits>
+
+ // nested namespaces are a C++17 compiler feature
+ namespace A::B {
+ using T = int;
+ }
+
+ int main() {
+ // std::void_t is a C++17 library feature
+ return not std::is_same<void,std::void_t<A::B::T> >{};
+ }
+ ")
+
+# build a list out of the pre-escaped tests
+set(CXX_VERSIONS_TEST "${cxx_20_test}" "${cxx_17_test}")
+
+# these are appended to "-std=c++" and tried in this order
+# note the escaped semicolons; that's necessary to create a nested list
+set(CXX_VERSIONS_FLAGS "20\;2a" "17\;1z")
+
+# by default, we enable C++17 for now, but not C++20
+# The user can override this choice by explicitly setting this variable
+set(CXX_MAX_STANDARD 17
+ CACHE STRING
+ "highest version of the C++ standard to enable. This version is also used if the version check is disabled")
+
+
+function(dune_require_cxx_standard)
+ include(CMakeParseArguments)
+
+ cmake_parse_arguments("" "" "MODULE;VERSION" "" ${ARGN})
+
+ if(_UNPARSED_ARGUMENTS)
+ message(WARNING "Unknown arguments in call to dune_require_cxx_standard(${ARGN})")
+ endif()
+
+ if(${_VERSION} GREATER ${CXX_MAX_SUPPORTED_STANDARD})
+
+ if(NOT _MODULE)
+ set(_MODULE "This module")
+ endif()
+
+ if(${_VERSION} GREATER ${CXX_MAX_STANDARD})
+ message(FATAL_ERROR "\
+${_MODULE} requires compiler support for C++${_VERSION}, but the build system is currently \
+set up to not allow newer language standards than C++${CXX_MAX_STANDARD}. Try setting the \
+CMake variable CXX_MAX_STANDARD to at least ${_VERSION}."
+ )
+ else()
+ if(${CXX_MAX_SUPPORTED_STANDARD} EQUAL 17)
+ set(CXX_STD_NAME 17)
+ else()
+ set(CXX_STD_NAME ${CXX_MAX_SUPPORTED_STANDARD})
+ endif()
+ message(FATAL_ERROR "${_MODULE} requires support for C++${_VERSION}, but your compiler failed our compatibility test."
+ )
+ endif()
+ endif()
+endfunction()
+
+
+# try to enable all of the C++ standards that we know about, in descending order
+if(NOT DISABLE_CXX_VERSION_CHECK)
+
+ foreach(version ${CXX_VERSIONS})
+
+ # skip versions that are newer than allowed
+ if(NOT(version GREATER CXX_MAX_STANDARD))
+
+ list(FIND CXX_VERSIONS ${version} version_index)
+ list(GET CXX_VERSIONS_FLAGS ${version_index} version_flags)
+
+ # First try whether the compiler accepts one of the command line flags for this standard
+ foreach(flag ${version_flags})
+
+ set(cxx_std_flag_works "cxx_std_flag_${flag}")
+ check_cxx_compiler_flag("-std=c++${flag}" ${cxx_std_flag_works})
+
+ if(${cxx_std_flag_works})
+ set(cxx_std_flag "-std=c++${flag}")
+ break()
+ endif()
+
+ endforeach()
+
+ # and if it did, run the compile test
+ if(cxx_std_flag)
+
+ list(GET CXX_VERSIONS_TEST ${version_index} version_test)
+ set(test_compiler_output "compiler_supports_cxx${version}")
+
+ cmake_push_check_state()
+ set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${cxx_std_flag}")
+ check_cxx_source_compiles("${version_test}" ${test_compiler_output})
+ cmake_pop_check_state()
+
+ if(${test_compiler_output})
+ set(CXX_MAX_SUPPORTED_STANDARD ${version})
+ set(CMAKE_CXX_FLAGS "${cxx_std_flag} ${CMAKE_CXX_FLAGS}")
+ break()
+ else()
+ # Wipe the variable, as this version of the standard doesn't seem to work
+ unset(cxx_std_flag)
+ endif()
+
+ endif()
+ endif()
+ endforeach()
+
+ if(NOT DEFINED CXX_MAX_SUPPORTED_STANDARD)
+ # Let's just assume every compiler at least claims C++03 compliance by now
+ message(WARNING "\
+Unable to determine C++ standard support for your compiler, falling back to C++17. \
+If you know that your compiler supports a newer version of the standard, please set the CMake \
+variable DISABLE_CXX_VERSION_CHECK to true and the CMake variable CXX_MAX_SUPPORTED_STANDARD \
+to the highest version of the standard supported by your compiler (e.g. 20). If your compiler \
+needs custom flags to switch to that standard version, you have to manually add them to \
+CMAKE_CXX_FLAGS."
+ )
+ set(CXX_MAX_SUPPORTED_STANDARD 17)
+ endif()
+else()
+ # We did not check version but need to set maximum supported
+ # version for some checks. Therefore we set it to CXX_MAX_STANDARD.
+ set(CXX_MAX_SUPPORTED_STANDARD ${CXX_MAX_STANDARD})
+endif()
+
+# make sure we have at least C++17
+dune_require_cxx_standard(MODULE "DUNE" VERSION 17)
+
+# perform tests
+
+# __attribute__((unused))
+check_cxx_source_compiles("
+ int main(void)
+ {
+ int __attribute__((unused)) foo;
+ return 0;
+ };
+" HAS_ATTRIBUTE_UNUSED
+)
+
+# __attribute__((deprecated))
+check_cxx_source_compiles("
+#define DEP __attribute__((deprecated))
+ class bar
+ {
+ bar() DEP;
+ };
+
+ class peng { } DEP;
+
+ template <class T>
+ class t_bar
+ {
+ t_bar() DEP;
+ };
+
+ template <class T>
+ class t_peng {
+ t_peng() {};
+ } DEP;
+
+ void foo() DEP;
+
+ void foo() {}
+
+ int main(void)
+ {
+ return 0;
+ };
+" HAS_ATTRIBUTE_DEPRECATED
+)
+
+# __attribute__((deprecated("msg")))
+check_cxx_source_compiles("
+#define DEP __attribute__((deprecated(\"message\")))
+ class bar {
+ bar() DEP;
+ };
+
+ class peng { } DEP;
+
+ template <class T>
+ class t_bar
+ {
+ t_bar() DEP;
+ };
+
+ template <class T>
+ class t_peng
+ {
+ t_peng() {};
+ } DEP;
+
+ void foo() DEP;
+
+ void foo() {}
+
+ int main(void)
+ {
+ return 0;
+ };
+" HAS_ATTRIBUTE_DEPRECATED_MSG
+)
+
+# ******************************************************************************
+#
+# Checks for standard library features
+#
+# While there are __cpp_lib_* feature test macros for all of these, those are
+# unfortunately unreliable, as libc++ does not have feature test macros yet.
+#
+# In order to keep the tests short, they use check_cxx_symbol_exists(). That
+# function can only test for macros and linkable symbols, however, so we wrap
+# tested types into a call to std::move(). That should be safe, as std::move()
+# does not require a complete type.
+#
+# ******************************************************************************
+
+check_cxx_symbol_exists(
+ "std::experimental::make_array<int,int>"
+ "experimental/array"
+ DUNE_HAVE_CXX_EXPERIMENTAL_MAKE_ARRAY
+ )
+
+check_cxx_symbol_exists(
+ "std::move<std::experimental::detected_t<std::decay_t,int>>"
+ "utility;experimental/type_traits"
+ DUNE_HAVE_CXX_EXPERIMENTAL_IS_DETECTED
+ )
+
+check_cxx_symbol_exists(
+ "std::identity"
+ "functional"
+ DUNE_HAVE_CXX_STD_IDENTITY
+ )
--- /dev/null
+# This cmake module provides infrastructure for building modules using Pybind11
+#
+# .. cmake_function:: dune_add_pybind11_module
+#
+# .. cmake_param:: NAME
+# :required:
+# :single:
+#
+# name of the Python module
+#
+# .. cmake_param:: SOURCES
+# :multi:
+#
+# source files to build shared library
+#
+# If this parameter is omitted, <name>.cc will be used if it exists.
+#
+# .. cmake_param:: EXCLUDE_FROM_ALL
+# :option:
+#
+# exclude this module from the all target
+#
+# .. cmake_param:: COMPILE_DEFINITIONS
+# :multi:
+# :argname: def
+#
+# A set of compile definitions to add to the target.
+# Only definitions beyond the application of :ref:`add_dune_all_flags`
+# have to be stated.
+#
+# .. cmake_param:: CMAKE_GUARD
+# :multi:
+# :argname: condition
+#
+# A number of conditions that CMake should evaluate before adding this
+# module. Use this feature instead of guarding the call to
+# :code:`dune_add_pybind11_module` with an :code:`if` clause.
+#
+# The passed condition can be a complex expression like
+# `( A OR B ) AND ( C OR D )`. Mind the spaces around the parentheses.
+#
+# Example: Write CMAKE_GUARD dune-foo_FOUND if you want your module to only
+# build when the dune-foo module is present.
+#
+include_guard(GLOBAL)
+
+function(dune_add_pybind11_module)
+ include(CMakeParseArguments)
+ cmake_parse_arguments(PYBIND11_MODULE "EXCLUDE_FROM_ALL" "NAME" "SOURCES;COMPILE_DEFINITIONS;CMAKE_GUARD" ${ARGN})
+ if(PYBIND11_MODULE_UNPARSED_ARGUMENTS)
+ message(WARNING "dune_add_pybind11_module: extra arguments provided (typos in named arguments?)")
+ endif()
+
+ if(NOT PYBIND11_MODULE_NAME)
+ message(FATAL_ERROR "dune_add_pybind11_module: module name not specified")
+ endif()
+
+ if(NOT PYBIND11_MODULE_SOURCES)
+ if(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${PYBIND11_MODULE_NAME}.cc)
+ set(PYBIND11_MODULE_SOURCES ${PYBIND11_MODULE_NAME}.cc)
+ else()
+ message(FATAL_ERROR "dune_add_pybind11_module: no source files specified")
+ endif()
+ endif()
+
+ foreach(condition ${PYBIND11_MODULE_CMAKE_GUARD})
+ separate_arguments(condition)
+ if(NOT (${condition}))
+ message(STATUS "not building ${PYBIND11_MODULE_NAME}, because condition ${condition} failed.")
+ return()
+ endif()
+ endforeach()
+
+ add_library(${PYBIND11_MODULE_NAME} SHARED ${PYBIND11_MODULE_SOURCES})
+ set_target_properties(${PYBIND11_MODULE_NAME} PROPERTIES PREFIX "")
+
+ # force '.so' as library suffix on macOS due to a problem in Python
+ # https://stackoverflow.com/questions/2488016/how-to-make-python-load-dylib-on-osx
+ # and add -undefined dynamic_lookup flag to linker
+ # https://pybind11.readthedocs.io/en/stable/compiling.html#building-manually
+ if (APPLE)
+ set_target_properties(${PYBIND11_MODULE_NAME} PROPERTIES SUFFIX ".so")
+ target_link_options(${PYBIND11_MODULE_NAME} PRIVATE -undefined dynamic_lookup)
+ endif()
+
+ target_compile_definitions(${PYBIND11_MODULE_NAME} PRIVATE ${PYBIND11_MODULE_COMPILE_DEFINITIONS})
+ dune_target_enable_all_packages(${PYBIND11_MODULE_NAME})
+
+ if(PYBIND11_MODULE_EXCLUDE_FROM_ALL)
+ set_property(TARGET ${PYBIND11_MODULE_NAME} PROPERTY EXCLUDE_FROM_ALL 1)
+ endif()
+endfunction()
--- /dev/null
+# Module with backward compatibility implementation of newer cmake functionality
+#
+# .. cmake_module::
+#
+# This module contains backward compatibility implementations of cmake
+# functionality that is not available in all cmake versions we support.
+#
+# * :ref:`dune_list_filter(...) <dune_list_filter>` for ``list(FILTER
+# ...)`` from cmake 3.7
+#
+#
+# .. cmake_function:: dune_list_filter
+#
+# .. cmake_brief::
+#
+# Compatibility implementation of ``list(FILTER)``
+#
+# .. cmake_param:: list
+# :positional:
+# :single:
+# :required:
+#
+# Name of list variable used as both input and output.
+#
+# .. cmake_param:: <INCLUDE|EXCLUDE>
+# :positional:
+# :option:
+# :required:
+#
+# Whether to include or to exclude the items matching the regular
+# expression.
+#
+# .. cmake_param:: REGEX
+# :single:
+# :required:
+# :argname: regular_expression
+#
+# The regular expression to match the items against.
+#
+# Match each item in the list against the regular expression. In
+# ``INCLUDE`` mode the result contains all items that matched, in
+# ``EXCLUDE`` mode it contains all items that did not match. Store the
+# result back in the variable ``list`` in the scope of the caller.
+#
+# This is exactly the same as the ``list(FILTER ...)`` command available in
+# cmake 3.7 and onward.
+include_guard(GLOBAL)
+
+# list(FILTER...) was introduced in cmake 3.6, this is a compatibility
+# implementation for earlier cmakes
+function(dune_list_filter list mode REGEX regular_expression)
+ message(DEPRECATION "dune_list_filter is deprecated and will be removed after Dune 2.8. Use list(FILTER ...) from CMake 3.6")
+
+ # validate arguments
+ if(NOT (("${mode}" STREQUAL "INCLUDE") OR ("${mode}" STREQUAL "EXCLUDE")))
+ message(FATAL_ERROR "unsupported mode '${mode}', must be either INCLUDE or EXCLUDE")
+ endif()
+ if(NOT ("${REGEX}" STREQUAL "REGEX"))
+ message(FATAL_ERROR "dune_list_filter can only filter by regular expression")
+ endif()
+ if("${ARGC}" GREATER 4)
+ message(FATAL_ERROR "extra arguments given: <${ARGN}>")
+ endif()
+
+ # cmake can't destinguish between empty lists and lists with one empty
+ # element. This is a problem when consecutively appending elements to a
+ # list: if the first elements we append are empty, we loose them. The
+ # "non-empty" token makes sure we start with a non-empty list and avoid this
+ # problem.
+ set(matched "non-empty")
+ set(unmatched "non-empty")
+ foreach(item IN LISTS "${list}")
+ # list(APPEND) does not quote the appended item (as of cmake 3.7.2), so do
+ # it manually
+ string(REPLACE [[;]] [[\;]] quoted_item "${item}")
+ if("${item}" MATCHES "${regular_expression}")
+ list(APPEND matched "${quoted_item}")
+ else()
+ list(APPEND unmatched "${quoted_item}")
+ endif()
+ endforeach(item)
+
+ if("${mode}" STREQUAL "INCLUDE")
+ set(result "${matched}")
+ else()
+ set(result "${unmatched}")
+ endif()
+
+ # remove the non-empty token from above. If the proper result would be a
+ # list of one empty element, we have no way of preserving that, it will turn
+ # into an empty list.
+ string(REGEX REPLACE "^non-empty;?" "" result "${result}")
+
+ # export
+ set("${list}" "${result}" PARENT_SCOPE)
+endfunction(dune_list_filter)
--- /dev/null
+# enforce C++-14
+dune_require_cxx_standard(MODULE "dune-common" VERSION 14)
+
+include(DuneStreams)
+dune_set_minimal_debug_level()
+
+# search for lapack
+find_package(LAPACK)
+include(AddBLASLapackFlags)
+
+find_package(GMP)
+include(AddGMPFlags)
+find_package(QuadMath)
+include(AddQuadMathFlags)
+
+# find program for image manipulation
+find_package(Inkscape)
+include(UseInkscape)
+
+# find the threading library
+find_package(Threads)
+include(AddThreadsFlags)
+
+# find the MPI library
+find_package(MPI 3.0 COMPONENTS C)
+include(AddMPIFlags)
+
+# find library for Threading Building Blocks
+find_package(TBB)
+include(AddTBBFlags)
+
+# find libraries for graph partitioning
+find_package(PTScotch)
+include(AddPTScotchFlags)
+find_package(METIS)
+include(AddMETISFlags)
+find_package(ParMETIS 4.0)
+include(AddParMETISFlags)
+
+# try to find the Vc library
+set(MINIMUM_VC_VERSION)
+if((CMAKE_CXX_COMPILER_ID STREQUAL Clang) AND
+ (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 7))
+ message("Raising minimum acceptable Vc version to 1.4.1 due to use of Clang 7 (or later), see https://gitlab.dune-project.org/core/dune-common/issues/132")
+ set(MINIMUM_VC_VERSION 1.4.1)
+endif()
+find_package(Vc ${MINIMUM_VC_VERSION} NO_MODULE)
+include(AddVcFlags)
+
+# Run the python extension of the Dune cmake build system
+include(DunePythonCommonMacros)
--- /dev/null
+message(DEPRECATION
+ "The cmake file 'DuneCxaDemangle.cmake' is deprecated. "
+ "Include the 'dune/common/classname.hh' header if you want to ensure that "
+ "HAVE_CXA_DEMANGLE is defined")
--- /dev/null
+#
+# Module that provides a custom target make doc at the top level
+# directory and utility macros for creating install directives
+# that make sure that the files to be installed are previously
+# generated even if make doc was not called.
+#
+# All documentation (Latex, Doxygen) will be generated during
+# make doc.
+# It provides the following macros:
+#
+# .. cmake_function:: dune_add_latex_document
+#
+# .. cmake_brief::
+#
+# wrapper around add_latex_document for compatibility reasons
+#
+# .. cmake_function:: create_doc_install
+#
+# .. cmake_brief::
+#
+# creates a target for creating and installing a file
+# to a given directory.
+#
+# .. cmake_param:: filename
+# :single:
+# :required:
+# :positional:
+#
+# The name of the file to be installed.
+#
+# .. cmake_param:: targetdir
+# :single:
+# :required:
+# :positional:
+#
+# The directory into which the beforementioned file will be installed.
+#
+# .. cmake_param:: dependency
+# :single:
+# :required:
+# :positional:
+#
+# A target that gets called to create the file that will be installed.
+#
+# .. note::
+#
+# This macro is needed, as we cannot add dependencies to the install
+# target. See https://gitlab.kitware.com/cmake/cmake/issues/8438
+# and https://gitlab.dune-project.org/core/dune-common/issues/36
+#
+include_guard(GLOBAL)
+
+include(UseLatexMk)
+
+if (LATEXMK_FOUND AND PDFLATEX_COMPILER)
+ set(LATEX_USABLE TRUE)
+endif()
+
+add_custom_target(doc)
+
+# add the Sphinx-generated build system documentation
+include(DuneSphinxCMakeDoc)
+# Support building documentation with doxygen.
+include(DuneDoxygen)
+
+macro(create_doc_install filename targetdir dependency)
+ if(LATEX_USABLE)
+ dune_module_path(MODULE dune-common RESULT scriptdir SCRIPT_DIR)
+ get_filename_component(targetfile ${filename} NAME)
+ set(install_command ${CMAKE_COMMAND} -D FILES=${filename} -D DIR=${CMAKE_INSTALL_PREFIX}/${targetdir} -P ${scriptdir}/InstallFile.cmake)
+
+ # create a custom target for the installation
+ add_custom_target(install_${targetfile} ${install_command}
+ COMMENT "Installing ${filename} to ${targetdir}"
+ DEPENDS ${dependency})
+ # When installing, call cmake install with the above install target and add the file to install_manifest.txt
+ install(CODE "execute_process(COMMAND \"${CMAKE_COMMAND}\" --build \"${CMAKE_BINARY_DIR}\" --target install_${targetfile} )
+ LIST(APPEND CMAKE_INSTALL_MANIFEST_FILES ${CMAKE_INSTALL_PREFIX}/${targetdir}/${targetfile})")
+ endif()
+endmacro(create_doc_install)
+
+macro(dune_add_latex_document)
+ add_latex_document(${ARGN})
+endmacro(dune_add_latex_document)
--- /dev/null
+# Module for building documentation using doxygen.
+#
+# .. cmake_function:: add_doxygen_target
+#
+# .. cmake_param:: TARGET
+# :single:
+#
+# The suffix to add to the target name, default to the module name.
+#
+# .. cmake_param:: DEPENDS
+# :multi:
+#
+# A list of further dependencies of the doxygen documentation.
+# Might include :code:`mainpage.txt`.
+#
+# .. cmake_param:: OUTPUT
+# :single:
+#
+# Name of the output target, necessary if you don't generate html.
+#
+# This macro creates a target for building (:code:`doxygen_${ProjectName}`) and installing
+# (:code:`doxygen_install_${ProjectName}`) the generated doxygen documentation.
+# The documentation is built during the top-level :code:`make doc` call. We have added a dependency
+# that makes sure it is built before running :code:`make install`.
+#
+include_guard(GLOBAL)
+
+find_package(Doxygen)
+set_package_properties("Doxygen" PROPERTIES
+ DESCRIPTION "Class documentation generator"
+ URL "www.doxygen.org"
+ PURPOSE "To generate the class documentation from C++ sources")
+include(CMakeParseArguments)
+
+# Set DOT_TRUE for the Doxyfile generation.
+if (NOT DOXYGEN_DOT_FOUND)
+ set(DOT_TRUE '\#')
+endif()
+
+add_custom_target(doxygen_install)
+
+#
+# prepare_doxyfile()
+# This functions adds the necessary routines for the generation of the
+# Doxyfile[.in] files needed to doxygen.
+macro(prepare_doxyfile)
+ message(STATUS "using ${DOXYSTYLE_FILE} to create doxystyle file")
+ message(STATUS "using C macro definitions from ${DOXYGENMACROS_FILE} for Doxygen")
+
+ # check whether module has a Doxylocal file
+ find_file(_DOXYLOCAL Doxylocal PATHS ${CMAKE_CURRENT_SOURCE_DIR} NO_DEFAULT_PATH)
+
+ if(_DOXYLOCAL)
+ set(make_doxyfile_command ${CMAKE_COMMAND} -D DOT_TRUE=${DOT_TRUE} -D DUNE_MOD_NAME=${ProjectName} -D DUNE_MOD_VERSION=${ProjectVersion} -D DOXYSTYLE=${DOXYSTYLE_FILE} -D DOXYGENMACROS=${DOXYGENMACROS_FILE} -D DOXYLOCAL=${CMAKE_CURRENT_SOURCE_DIR}/Doxylocal -D abs_top_srcdir=${CMAKE_SOURCE_DIR} -D srcdir=${CMAKE_CURRENT_SOURCE_DIR} -D top_srcdir=${CMAKE_SOURCE_DIR} -P ${scriptdir}/CreateDoxyFile.cmake)
+ add_custom_command(OUTPUT Doxyfile.in Doxyfile
+ COMMAND ${make_doxyfile_command}
+ COMMENT "Creating Doxyfile.in"
+ DEPENDS ${DOXYSTYLE_FILE} ${DOXYGENMACROS_FILE} ${CMAKE_CURRENT_SOURCE_DIR}/Doxylocal)
+ else()
+ set(make_doxyfile_command ${CMAKE_COMMAND} -D DOT_TRUE=${DOT_TRUE} -D DUNE_MOD_NAME=${ProjectName} -D DUNE_MOD_VERSION=${DUNE_MOD_VERSION} -D DOXYSTYLE=${DOXYSTYLE_FILE} -D DOXYGENMACROS=${DOXYGENMACROS_FILE} -D abs_top_srcdir=${CMAKE_SOURCE_DIR} -D top_srcdir=${CMAKE_SOURCE_DIR} -P ${scriptdir}/CreateDoxyFile.cmake)
+ add_custom_command(OUTPUT Doxyfile.in Doxyfile
+ COMMAND ${make_doxyfile_command}
+ COMMENT "Creating Doxyfile.in"
+ DEPENDS ${DOXYSTYLE_FILE} ${DOXYGENMACROS_FILE})
+ endif()
+ add_custom_target(doxyfile DEPENDS Doxyfile.in Doxyfile)
+endmacro(prepare_doxyfile)
+
+macro(add_doxygen_target)
+ set(options )
+ set(oneValueArgs TARGET OUTPUT)
+ set(multiValueArgs DEPENDS)
+ cmake_parse_arguments(DOXYGEN "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN} )
+
+ # default target name is the module name
+ if(NOT DOXYGEN_TARGET)
+ set(DOXYGEN_TARGET ${ProjectName})
+ endif()
+
+ # default output is html
+ if(NOT DOXYGEN_OUTPUT)
+ set(DOXYGEN_OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/html")
+ endif()
+
+ dune_module_path(MODULE dune-common RESULT scriptdir SCRIPT_DIR)
+ if(PROJECT_NAME STREQUAL "dune-common")
+ set(DOXYSTYLE_FILE ${CMAKE_CURRENT_SOURCE_DIR}/Doxystyle)
+ set(DOXYGENMACROS_FILE ${CMAKE_CURRENT_SOURCE_DIR}/doxygen-macros)
+ endif()
+ message(STATUS "Using scripts from ${scriptdir} for creating doxygen stuff.")
+
+ if(DOXYGEN_FOUND)
+ prepare_doxyfile()
+ # custom command that executes doxygen
+ add_custom_command(OUTPUT ${DOXYGEN_OUTPUT}
+ COMMAND ${CMAKE_COMMAND} -D DOXYGEN_EXECUTABLE=${DOXYGEN_EXECUTABLE} -P ${scriptdir}/RunDoxygen.cmake
+ COMMENT "Building doxygen documentation. This may take a while"
+ DEPENDS Doxyfile.in ${DOXYGEN_DEPENDS})
+ # Create a target for building the doxygen documentation of a module,
+ # that is run during make doc
+ add_custom_target(doxygen_${DOXYGEN_TARGET}
+ DEPENDS ${DOXYGEN_OUTPUT})
+ add_dependencies(doc doxygen_${DOXYGEN_TARGET})
+
+ # Use a cmake call to install the doxygen documentation and create a
+ # target for it
+ include(GNUInstallDirs)
+ # When installing call cmake install with the above install target
+ install(CODE
+ "execute_process(COMMAND ${CMAKE_COMMAND} --build ${CMAKE_BINARY_DIR} --target doxygen_${ProjectName}
+ WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
+ file(GLOB doxygenfiles
+ GLOB ${CMAKE_CURRENT_BINARY_DIR}/html/*.html
+ ${CMAKE_CURRENT_BINARY_DIR}/html/*.js
+ ${CMAKE_CURRENT_BINARY_DIR}/html/*.png
+ ${CMAKE_CURRENT_BINARY_DIR}/html/*.css
+ ${CMAKE_CURRENT_BINARY_DIR}/html/*.gif
+ ${CMAKE_CURRENT_BINARY_DIR}/*.tag
+ )
+ set(doxygenfiles \"\${doxygenfiles}\")
+ foreach(_file \${doxygenfiles})
+ get_filename_component(_basename \${_file} NAME)
+ LIST(APPEND CMAKE_INSTALL_MANIFEST_FILES ${CMAKE_INSTALL_FULL_DOCDIR}/doxygen/\${_basename})
+ endforeach()
+ file(INSTALL \${doxygenfiles} DESTINATION ${CMAKE_INSTALL_FULL_DOCDIR}/doxygen)
+ message(STATUS \"Installed doxygen into ${CMAKE_INSTALL_FULL_DOCDIR}/doxygen\")")
+ endif()
+endmacro(add_doxygen_target)
--- /dev/null
+# Implementation of a simplified CMake build system.
+#
+# .. cmake_function:: dune_enable_all_packages
+#
+# .. cmake_brief::
+#
+# Previously, the DUNE build system relied on the user to choose and add the compile and link flags
+# necessary to build an executable. While this offers full control to the user, it
+# is an error-prone procedure.
+#
+# Alternatively, users may use this function to simply add the compile flags for all
+# found external modules to all executables in a DUNE module. Likewise, all found libraries are
+# linked to all targets.
+#
+# .. cmake_param:: INCLUDE_DIRS
+# :multi:
+#
+# A list of include directories, that should be added to all targets.
+# In a standard Dune module, it is not necessary to specify anything.
+#
+# .. cmake_param:: COMPILE_DEFINITIONS
+# :multi:
+#
+# A list of compile definitions, that should be added to all targets.
+# In a standard Dune module, it is not necessary to specify anything.
+#
+# .. cmake_param:: COMPILE_OPTIONS
+# :multi:
+#
+# A list of non-definition compile options, that should be added to all targets.
+# In a standard Dune module, it is not necessary to specify anything.
+#
+# .. cmake_param:: MODULE_LIBRARIES
+# :multi:
+#
+# If your module contains libraries as well as programs and if the programs should automatically
+# link to those libraries, you *MUST* list these libraries in :code:`MODULE_LIBRARIES`. Those libraries will be
+# automatically created by :ref:`dune_enable_all_packages` (which internally calls :ref:`dune_add_library`) and placed
+# in the lib/ directory. The order of the libraries matters: if one library depends on another one, it must
+# be listed after its dependency. This special handling of the libraries is due to the way CMake
+# handle linking (in particular CMP022 and CMP038). You can later add source files to the library
+# anywhere in the source tree by calling :ref:`dune_library_add_sources`.
+#
+# .. cmake_param:: VERBOSE
+# :option:
+#
+# If this option is set, the set of compile flags, linked libraries and include directories
+# that is in use for all targets in the module is printed upon configuration.
+#
+# .. cmake_param:: APPEND
+# :option:
+#
+# If this option is set, the definitions, flags and directories specified in this function are
+# appended to the global collection of flags instead of being prepended. Only use it, if you know
+# what you are doing.
+#
+# Adds all flags and all libraries to all executables that are subsequently added in the directory
+# from where this function is called and from all its subdirectories (recursively).
+# If used, this function *MUST* be called in the top level CMakeLists.txt BEFORE adding any subdirectories!
+# You can optionally add additional include dirs and compile definitions that will also be applied to
+# all targets in the module.
+#
+# .. note::
+# If you want to use :code:`dune_enable_all_packages` with an older version of CMake and your DUNE module
+# creates its own library, you have to manually create the library in the top-level CMakeLists.txt
+# file using :ref:`dune_add_library` (with all sources listed within that call), use
+# :ref:`dune_target_enable_all_packages` to add all packages to the library and finally list that library
+# under :code:`LIBRARIES` in the call to :ref:`dune_register_package_flags`. See dune-pdelab for an example of
+# how to do this correctly.
+#
+# While :ref:`dune_enable_all_packages` defines the user interface for this feature, developers might
+# also be interested in the following related functions:
+#
+# * :ref:`dune_target_enable_all_packages`
+# * :ref:`dune_register_package_flags`
+# * :ref:`dune_library_add_sources`
+#
+# .. cmake_function:: dune_target_enable_all_packages
+#
+# .. cmake_param:: TARGETS
+# :multi:
+#
+# A list of targets to add all flags etc. too.
+#
+# Adds all currently registered package flags (see :ref:`dune_register_package_flags`) to the given targets.
+# This function is mainly intended to help write DUNE modules that want to use :ref:`dune_enable_all_packages` and
+# define their own libraries, but need to be compatible with CMake < 3.1
+#
+# .. cmake_function:: dune_register_package_flags
+#
+# .. cmake_param:: INCLUDE_DIRS
+# :multi:
+#
+# The list of include directories needed by the external package.
+#
+# .. cmake_param:: COMPILE_DEFINITIONS
+# :multi:
+#
+# The list of compile definitions needed by the external package.
+#
+# .. cmake_param:: COMPILE_OPTIONS
+# :multi:
+#
+# The list of compile options needed by the external package.
+#
+# .. cmake_param:: LIBRARIES
+# :multi:
+#
+# The list of libraries that the external package should link to.
+# The order of the input is preserved in the output.
+#
+# .. cmake_param:: APPEND
+# :option:
+#
+# If this option is set, the definitions, flags and directories specified in this function are
+# appended to the global collection of flags instead of being prepended. Only use it, if you know
+# what you are doing.
+#
+# To correctly implement the automatic handling of external libraries, the compile flags, include paths and link
+# flags of all found packages must be registered with this function. This function is only necessary for people that
+# want to write their own :code:`FindFooBar` CMake modules to link against additional libraries which are not supported by
+# the DUNE core modules. Call this function at the end of every find module. If you are using an external FindFoo
+# module which you cannot alter, call it after the call to :code:`find_package(foo)`.
+#
+# .. cmake_function:: dune_library_add_sources
+#
+# .. cmake_param:: module_library
+# :single:
+# :positional:
+#
+# The name of the module library target.
+#
+# .. cmake_param: SOURCES
+# :multi:
+# :required:
+#
+# The source files to add to the DUNE module library :code:`module_library`.
+# That library must have been created by an earlier call to :ref:`dune_enable_all_packages`
+# in the current DUNE module.
+#
+# Register sources for module exported library.
+#
+include_guard(GLOBAL)
+
+function(dune_register_package_flags)
+ include(CMakeParseArguments)
+ set(OPTIONS APPEND)
+ set(SINGLEARGS)
+ set(MULTIARGS COMPILE_DEFINITIONS COMPILE_OPTIONS INCLUDE_DIRS LIBRARIES)
+ cmake_parse_arguments(REGISTRY "${OPTIONS}" "${SINGLEARGS}" "${MULTIARGS}" ${ARGN})
+
+ if(REGISTRY_UNPARSED_ARGUMENTS)
+ message(WARNING "Unrecognized arguments for dune_register_package_flags!")
+ endif()
+
+ if(REGISTRY_APPEND)
+ set_property(GLOBAL APPEND PROPERTY ALL_PKG_INCS "${REGISTRY_INCLUDE_DIRS}")
+ set_property(GLOBAL APPEND PROPERTY ALL_PKG_LIBS "${REGISTRY_LIBRARIES}")
+ set_property(GLOBAL APPEND PROPERTY ALL_PKG_DEFS "${REGISTRY_COMPILE_DEFINITIONS}")
+ set_property(GLOBAL APPEND PROPERTY ALL_PKG_OPTS "${REGISTRY_COMPILE_OPTIONS}")
+ else(REGISTRY_APPEND)
+ get_property(all_incs GLOBAL PROPERTY ALL_PKG_INCS)
+ get_property(all_libs GLOBAL PROPERTY ALL_PKG_LIBS)
+ get_property(all_defs GLOBAL PROPERTY ALL_PKG_DEFS)
+ get_property(all_opts GLOBAL PROPERTY ALL_PKG_OPTS)
+ set_property(GLOBAL PROPERTY ALL_PKG_INCS "${REGISTRY_INCLUDE_DIRS}" "${all_incs}")
+ set_property(GLOBAL PROPERTY ALL_PKG_LIBS "${REGISTRY_LIBRARIES}" "${all_libs}")
+ set_property(GLOBAL PROPERTY ALL_PKG_DEFS "${REGISTRY_COMPILE_DEFINITIONS}" "${all_defs}")
+ set_property(GLOBAL PROPERTY ALL_PKG_OPTS "${REGISTRY_COMPILE_OPTIONS}" "${all_opts}")
+ endif(REGISTRY_APPEND)
+endfunction(dune_register_package_flags)
+
+
+function(dune_enable_all_packages)
+ include(CMakeParseArguments)
+ set(OPTIONS APPEND VERBOSE)
+ set(SINGLEARGS)
+ set(MULTIARGS COMPILE_DEFINITIONS COMPILE_OPTIONS INCLUDE_DIRS MODULE_LIBRARIES)
+ cmake_parse_arguments(DUNE_ENABLE_ALL_PACKAGES "${OPTIONS}" "${SINGLEARGS}" "${MULTIARGS}" ${ARGN})
+
+ if(DUNE_ENABLE_ALL_PACKAGES_UNPARSED_ARGUMENTS)
+ message(WARNING "Unrecognized arguments for dune_enable_all_packages!")
+ endif()
+
+ # handle additional include dirs specified in dune_enable_all_packages
+ if(DUNE_ENABLE_ALL_PACKAGES_INCLUDE_DIRS)
+ if(DUNE_ENABLE_ALL_PACKAGES_APPEND)
+ set_property(GLOBAL APPEND PROPERTY ALL_PKG_INCS "${DUNE_ENABLE_ALL_PACKAGES_INCLUDE_DIRS}")
+ else(DUNE_ENABLE_ALL_PACKAGES_APPEND)
+ get_property(all_incs GLOBAL PROPERTY ALL_PKG_INCS)
+ set_property(GLOBAL PROPERTY ALL_PKG_INCS "${DUNE_ENABLE_ALL_PACKAGES_INCLUDE_DIRS}" "${all_incs}")
+ endif(DUNE_ENABLE_ALL_PACKAGES_APPEND)
+ endif(DUNE_ENABLE_ALL_PACKAGES_INCLUDE_DIRS)
+
+ # add include dirs to all targets in module
+ get_property(all_incs GLOBAL PROPERTY ALL_PKG_INCS)
+ include_directories(${all_incs})
+ # verbose output of include dirs
+ if(DUNE_ENABLE_ALL_PACKAGES_VERBOSE)
+ message("Include directories for this project: ${all_incs}")
+ endif(DUNE_ENABLE_ALL_PACKAGES_VERBOSE)
+
+ # handle additional compile definitions specified in dune_enable_all_packages
+ if(DUNE_ENABLE_ALL_PACKAGES_COMPILE_DEFINITIONS)
+ if(DUNE_ENABLE_ALL_PACKAGES_APPEND)
+ set_property(GLOBAL APPEND PROPERTY ALL_PKG_DEFS "${DUNE_ENABLE_ALL_PACKAGES_COMPILE_DEFINITIONS}")
+ else(DUNE_ENABLE_ALL_PACKAGES_APPEND)
+ get_property(all_defs GLOBAL PROPERTY ALL_PKG_DEFS)
+ set_property(GLOBAL PROPERTY ALL_PKG_DEFS "${DUNE_ENABLE_ALL_PACKAGES_COMPILE_DEFINITIONS}" "${all_defs}")
+ endif(DUNE_ENABLE_ALL_PACKAGES_APPEND)
+ endif(DUNE_ENABLE_ALL_PACKAGES_COMPILE_DEFINITIONS)
+
+ # add compile definitions to all targets in module
+ get_property(all_defs GLOBAL PROPERTY ALL_PKG_DEFS)
+ # We have to do this in a loop because add_definitions() is kind of broken: even though it is supposed
+ # to be *the* function for adding compile definitions, it does not prepend "-D" (as opposed to
+ # target_compile_definitions(), which does). Well, whatever...
+ foreach(_definition ${all_defs})
+ if(_definition)
+ add_definitions("-D${_definition}")
+ endif()
+ endforeach()
+ # verbose output of compile definitions
+ if(DUNE_ENABLE_ALL_PACKAGES_VERBOSE)
+ message("Compile definitions for this project: ${all_defs}")
+ endif(DUNE_ENABLE_ALL_PACKAGES_VERBOSE)
+
+ # handle additional compile options specified in dune_enable_all_packages
+ if(DUNE_ENABLE_ALL_PACKAGES_COMPILE_OPTIONS)
+ if(DUNE_ENABLE_ALL_PACKAGES_APPEND)
+ set_property(GLOBAL APPEND PROPERTY ALL_PKG_OPTS "${DUNE_ENABLE_ALL_PACKAGES_COMPILE_OPTIONS}")
+ else(DUNE_ENABLE_ALL_PACKAGES_APPEND)
+ get_property(all_opts GLOBAL PROPERTY ALL_PKG_OPTS)
+ set_property(GLOBAL PROPERTY ALL_PKG_OPTS "${DUNE_ENABLE_ALL_PACKAGES_COMPILE_OPTIONS}" "${all_opts}")
+ endif(DUNE_ENABLE_ALL_PACKAGES_APPEND)
+ endif(DUNE_ENABLE_ALL_PACKAGES_COMPILE_OPTIONS)
+
+ # add compile options to all targets in module
+ get_property(all_opts GLOBAL PROPERTY ALL_PKG_OPTS)
+ add_compile_options(${all_opts})
+ # verbose output of compile definitions
+ if(DUNE_ENABLE_ALL_PACKAGES_VERBOSE)
+ message("Compile options for this project: ${all_opts}")
+ endif(DUNE_ENABLE_ALL_PACKAGES_VERBOSE)
+
+ # handle libraries
+ # this is a little tricky because the libraries defined within the current module require special
+ # handling to avoid tripping over CMake policies CMP022 and CMP038
+
+ # first add all libraries of upstream Dune modules and of external dependencies
+ get_property(all_libs GLOBAL PROPERTY ALL_PKG_LIBS)
+ link_libraries(${DUNE_LIBS} ${all_libs})
+
+ # now we have to do a little dance: Newer versions of CMake complain if a target links to itself,
+ # so we have to create all targets for libraries inside the module before adding them to the set
+ # of default libraries to link to. That works because calling link_libraries does not affect targets
+ # which already exist.
+ # Moroever, CMake generates a warning when creating a library without any source files, and the linker
+ # does the same if we add an empty dummy file. We work around that problem by autogenerating a library-specific
+ # stub source file with two functions ${lib_name}_version() and ${lib_name}_version_string() and add that
+ # as an initial source file.
+ # After creating the library with dune_add_library(), we add it to all future targets with a call to
+ # link_libraries(). The user can then add the real source files by calling dune_library_add_sources()
+ # throughout the module.
+
+ if(DUNE_ENABLE_ALL_PACKAGES_MODULE_LIBRARIES)
+
+ # make sure the /lib directory exists - we need it to create the stub source file in there
+ file(MAKE_DIRECTORY "${PROJECT_BINARY_DIR}/lib")
+ # figure out the location of the stub source template
+ dune_module_path(MODULE dune-common RESULT script_dir SCRIPT_DIR)
+ foreach(module_lib ${DUNE_ENABLE_ALL_PACKAGES_MODULE_LIBRARIES})
+ # create the stub source file in the output directory (using a c++ compatible name)...
+ string(REGEX REPLACE "[^a-zA-Z0-9]" "_" module_lib_mangled ${module_lib})
+ configure_file("${script_dir}/module_library.cc.in" "${PROJECT_BINARY_DIR}/lib/lib${module_lib}_stub.cc")
+
+ # ...and create the library...
+ dune_add_library(${module_lib} SOURCES "${PROJECT_BINARY_DIR}/lib/lib${module_lib}_stub.cc")
+ # ...and add it to all future targets in the module
+ link_libraries(${module_lib})
+ endforeach(module_lib ${DUNE_ENABLE_ALL_PACKAGES_MODULE_LIBRARIES})
+
+ # export the DUNE_ENABLE_ALL_PACKAGES_MODULE_LIBRARIES variable to the parent scope
+ # this is required to make dune_library_add_sources() work (see further down)
+ set(
+ DUNE_ENABLE_ALL_PACKAGES_MODULE_LIBRARIES
+ ${DUNE_ENABLE_ALL_PACKAGES_MODULE_LIBRARIES}
+ PARENT_SCOPE
+ )
+ endif(DUNE_ENABLE_ALL_PACKAGES_MODULE_LIBRARIES)
+
+ if(DUNE_ENABLE_ALL_PACKAGES_VERBOSE)
+ get_property(all_libs GLOBAL PROPERTY ALL_PKG_LIBS)
+ message("Libraries for this project: ${all_libs}")
+ endif(DUNE_ENABLE_ALL_PACKAGES_VERBOSE)
+
+endfunction(dune_enable_all_packages)
+
+
+function(dune_target_enable_all_packages)
+ foreach(_target ${ARGN})
+
+ get_property(all_incs GLOBAL PROPERTY ALL_PKG_INCS)
+ target_include_directories(${_target} PUBLIC ${all_incs})
+
+ get_property(all_defs GLOBAL PROPERTY ALL_PKG_DEFS)
+ target_compile_definitions(${_target} PUBLIC ${all_defs})
+
+ get_property(all_opts GLOBAL PROPERTY ALL_PKG_OPTS)
+ target_compile_options(${_target} PUBLIC ${all_opts})
+
+ get_property(all_libs GLOBAL PROPERTY ALL_PKG_LIBS)
+ target_link_libraries(${_target} PUBLIC ${DUNE_LIBS} ${all_libs})
+
+ endforeach()
+endfunction(dune_target_enable_all_packages)
+
+
+function(dune_library_add_sources lib)
+
+ if (NOT (DEFINED DUNE_ENABLE_ALL_PACKAGES_MODULE_LIBRARIES))
+ message(FATAL_ERROR "You must call dune_enable_all_packages with the MODULE_LIBRARIES option before calling dune_library_add_sources")
+ endif()
+
+ # This looks weird, but seems to be the most practical way to check for list membership,
+ # as list(FIND ...) does not work reliably in a macro...
+ if (NOT (";${DUNE_ENABLE_ALL_PACKAGES_MODULE_LIBRARIES};" MATCHES ";${lib};"))
+ message(FATAL_ERROR
+"Attempt to add sources to library ${lib}, which has not been defined in dune_enable_all_packages.
+List of libraries defined in dune_enable_all_packages: ${DUNE_ENABLE_ALL_PACKAGES_MODULE_LIBRARIES}")
+ endif()
+
+ include(CMakeParseArguments)
+ cmake_parse_arguments(DUNE_LIBRARY_ADD_SOURCES "" "" "SOURCES" ${ARGN})
+
+ if(DUNE_LIBRARY_ADD_SOURCES_UNPARSED_ARGUMENTS)
+ message(WARNING "Unrecognized arguments for dune_library_add_sources!")
+ endif()
+
+ foreach(source ${DUNE_LIBRARY_ADD_SOURCES_SOURCES})
+ if(IS_ABSOLUTE ${source})
+ target_sources(${lib} PRIVATE ${source})
+ else()
+ target_sources(${lib} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/${source})
+ endif()
+ endforeach()
+endfunction()
--- /dev/null
+# An error checking wrapper around the cmake command execute_process
+#
+# .. cmake_command:: dune_execute_process
+#
+# .. cmake_param:: ERROR_MESSAGE
+# :single:
+#
+# Error message to show if command exited with non-zero exit code.
+# This also implies abortion of the current cmake run with a fatal error.
+# Note, that if this is omitted, no return code checking is done.
+#
+# A thin wrapper around the cmake command :code:`execute_process`, that
+# exits on non-zero exit codes. All arguments are forwarded to the actual
+# cmake command.
+#
+include_guard(GLOBAL)
+
+function(dune_execute_process)
+ include(CMakeParseArguments)
+ cmake_parse_arguments(EXECUTE "" "ERROR_MESSAGE;RESULT_VARIABLE;OUTPUT_VARIABLE;ERROR_VARIABLE" "" ${ARGN})
+
+ # Decide whether stdout and stderr have to be split
+ if(EXECUTE_OUTPUT_VARIABLE AND EXECUTE_ERROR_VARIABLE)
+ set(SPLIT_ERROR TRUE)
+ set(ERRLOGVAR errlog)
+ else()
+ set(SPLIT_ERROR FALSE)
+ set(ERRLOGVAR log)
+ endif()
+
+ # Call the original cmake function
+ execute_process(${EXECUTE_UNPARSED_ARGUMENTS}
+ RESULT_VARIABLE retcode
+ OUTPUT_VARIABLE log
+ ERROR_VARIABLE ${ERRLOGVAR}
+ )
+
+ # Issue an error if requested!
+ if(EXECUTE_ERROR_MESSAGE)
+ if(NOT "${retcode}" STREQUAL "0")
+ cmake_parse_arguments(ERR "" "" "COMMAND" ${EXECUTE_UNPARSED_ARGUMENTS})
+ if(SPLIT_ERROR)
+ set(log "stdout:\n${log}\n\nstderr:\b${errlog}")
+ endif()
+ message(FATAL_ERROR "${EXECUTE_ERROR_MESSAGE}\nRun command:${ERR_COMMAND}\nReturn code: ${retcode}\nDetailed log:\n${log}")
+ endif()
+ endif()
+
+ # Propagate logs back to the calling scope
+ if(EXECUTE_RESULT_VARIABLE)
+ set(${EXECUTE_RESULT_VARIABLE} ${retcode} PARENT_SCOPE)
+ endif()
+ if(EXECUTE_OUTPUT_VARIABLE)
+ set(${EXECUTE_OUTPUT_VARIABLE} ${log} PARENT_SCOPE)
+ endif()
+ if(EXECUTE_ERROR_VARIABLE)
+ set(${EXECUTE_ERROR_VARIABLE} ${${ERROR_VARIABLE}} PARENT_SCOPE)
+ endif()
+endfunction()
--- /dev/null
+# Module to generate instantiations, typically for some template
+#
+# .. cmake_module::
+#
+# This module can be used to generate explicit template instantiations.
+# Suppose you have a template test function that you want to call for a
+# number of template arguments. You want to explicitly instantiate the
+# function for each set of template arguments, and put the instanciation
+# into its own translation unit. (This can be beneficial in that it limits
+# the amount of code that the optimizer sees at once, and thus it can
+# reduce both memory and cpu requirements during compilation.)
+#
+# .. rubric:: Examples
+#
+# Let's say you are writing a test ``mytest.cc`` and need to call a
+# template function for several types::
+#
+# #include <mytestsuite.hh>
+#
+# int main() {
+# MyTestSuite suite;
+#
+# suite.test<bool>();
+# suite.test<char>();
+# suite.test<int>();
+# suite.test<double>();
+#
+# return suite.good() ? EXIT_SUCCESS : EXIT_FAILURE;
+# }
+#
+# Let's further say that you want to explicitly instantiate each used
+# ``MyTestSuite::test()`` instance in it's own translation unit. Then you
+# need a series of files ``mytest_instance_bool.cc``,
+# ``mytest_instance_char.cc``, etc, all with essentially the same content::
+#
+# #include <mytestsuite.hh>
+#
+# template void MyTestSuite::test<@TYPE@>();
+#
+# where ``@TYPE@`` is replaced by ``bool``, ``char``, etc as appropriate.
+#
+# This is however not enough: all translation units need to know which
+# instances of ``MyTestSuite::test()`` are instantiated explicitly so they
+# do not instantiate them implicitly themselves (that would violate the
+# one-definition rule). C++ only allows to declare individual instances as
+# extern, not all of them collectively, so we need to put a list of all
+# these instances into a header ``mytest.hh``::
+#
+# #include <mytestsuite.hh>
+#
+# extern template void MyTestSuite::test<bool>();
+# extern template void MyTestSuite::test<char>();
+# extern template void MyTestSuite::test<int>();
+# extern template void MyTestSuite::test<double>();
+#
+# We also need to include that header from each translation unit in the
+# test, we can simply replace ``#include <mytestsuite.hh>`` with ``#include
+# <mytest.hh>``.
+#
+# This is of course tedious and prone tp break if the list of tested types
+# changes. To make this less fragile this module provides a series of
+# commands: :ref:`dune_instance_begin() <dune_instance_begin>`,
+# :ref:`dune_instance_add() <dune_instance_add>`, and
+# :ref:`dune_instance_end() <dune_instance_end>`, which can be used to
+# automatically generate the explicit instantiations for each type and the
+# contents for the header and the body of main.
+#
+# This may look like this in ``CMakeLists.txt``::
+#
+# dune_instance_begin(FILES mytest.cc mytest.hh)
+#
+# foreach(TYPE IN ITEMS bool char int double)
+# dune_instance_add(ID "${TYPE}" FILES mytest_instance.cc)
+# endforeach(TYPE IN ITEMS bool char int double)
+#
+# dune_instance_end()
+#
+# list(FILTER DUNE_INSTANCE_GENERATED INCLUDE REGEX [[\.cc$]])
+# dune_add_test(NAME mytest
+# SOURCES ${DUNE_INSTANCE_GENERATED})
+#
+# The call to :ref:`dune_instance_begin() <dune_instance_begin>` reads
+# ``mytest.cc.in`` and ``mytest.hh.in`` and splits them into embedded
+# templates and other content. It will replace occurrances of ``@VAR@``
+# now in the other content and save the result for later.
+#
+# The call to :ref:`dune_instance_add() <dune_instance_add>` occurs in a
+# loop. Each call will instanciate the embedded templates extracted
+# earlier to replace occurance of ``@TYPE@`` by the value of the variable
+# ``TYPE`` set in the for loop. Then files containing explicit
+# instantiatons will be generated as ``mytest_instance_bool.cc``,
+# ``mytest_instance_bool.cc``, etc, from a template file
+# ``mytest_instance.cc.in``. The name of the generated files are the base
+# file name from the template definition with the ``ID`` inserted before
+# the extension. The name of the template file is the same base file name
+# with ``.in`` appended.
+#
+# :ref:`dune_instance_end() <dune_instance_end>` is used to write
+# ``mytest.cc`` and ``mytest.hh`` with the collected content from the
+# embedded templates. The list of generated files will be available in the
+# variable ``DUNE_INSTANCE_GENERATED``.
+#
+# The template files then look like this:
+#
+# ``mytest.cc.in``::
+#
+# // @GENERATED_SOURCE@
+#
+# #include <config.h>
+#
+# #include <mytest.hh>
+#
+# int main() {
+# MyTestSuite suite;
+#
+# #cmake @template@
+# suite.test<@TYPE@>();
+# #cmake @endtemplate@
+#
+# return suite.good() ? EXIT_SUCCESS : EXIT_FAILURE;
+# }
+#
+# ``mytest.hh.in``::
+#
+# // @GENERATED_SOURCE@
+#
+# #include <mytestsuite.hh>
+#
+# #cmake @template@
+# extern template void MyTestSuite::test<@TYPE@>();
+# #cmake @endtemplate@
+#
+# ``mytest_instance.cc.in``::
+#
+# // @GENERATED_SOURCE@
+#
+# #include <config.h>
+#
+# #include <mytest.hh>
+#
+# template void MyTestSuite::test<@TYPE@>();
+#
+# The ``@GENERATED_SOURCE@`` substitution is good practice, it tells a
+# human reader that this file was generated and what the template file was,
+# and it hints editors to go into read-only mode.
+#
+# .. rubric:: Embedded Templates
+#
+# The template files given in :ref:`dune_instance_begin()
+# <dune_instance_begin>` can contain embedded templates. These will be
+# instantiated by :ref:`dune_instance_add() <dune_instance_add>`, and all
+# instantiations will be concatenated together and replace the original
+# embedded template.
+#
+# The begin of an embedded template is marked by a line containing
+# ``@template@`` or ``@template NAME@``. Leaving off the name is
+# equivalent to an empty name. ``dune_instance_add(TEMPLATE NAME)`` will
+# only instanciate embedded templates whose name matches and ignore all
+# others.
+#
+# The end of an embedded template is marked by a line containing
+# ``@endtemplate@`` or ``@endtemplate NAME@``. If a name is given, it must
+# match the name of the embedded template it closes. If no name is given
+# (or the name is empty), that check is omitted.
+#
+# There may be arbitrary characters on the same line before or after the
+# begin and end markers. These are ignored, so you can use them for
+# comments or to trick your editor into proper indentation. The one
+# exception is that the line surrounding the marker may not contain any
+# ``@`` characters to avoid ambiguities.
+#
+# .. rubric:: How Files And Strings Are Generated
+#
+# The generation is done using the cmake command ``configure_file(...)``
+# for template files and ``string(CONFIGURE ...)`` for template strings.
+# These simply substitute the current variable values, so make sure to set
+# up the variables to substitute before calling :ref:`dune_instance_add()
+# <dune_instance_add>` or :ref:`dune_instance_begin()
+# <dune_instance_begin>`.
+#
+# Refrain from using substitutions that begin with an underscore
+# (i.e. ``@_my_local_var@``). The generation functions in this module use
+# such names for their local variables and may hide the variable you are
+# trying to substitude.
+#
+# When instantiating files we set up a few convenience variables before
+# calling ``configure_file()`` that can be used in substitutions:
+# ``@TEMPLATE@`` contains the name of the template file. ``@INSTANCE@``
+# contains the name of the file being generated, not including an implied
+# ``${CMAKE_CURRENT_BINARY_DIR}``. Use ``@BINDIR_INSTANCE@`` if you do
+# want the implied ``${CMAKE_CURRENT_BINARY_DIR}``. ``@GENERATED_SOURCE@``
+# contains a one-line message that this file was generated, including the
+# name of the template file.
+#
+# .. rubric:: Main Interface
+#
+# These are the ones you normally use.
+#
+# * :ref:`dune_instance_begin() <dune_instance_begin>`
+# * :ref:`dune_instance_add() <dune_instance_add>`
+# * :ref:`dune_instance_end() <dune_instance_end>`
+# * :ref:`DUNE_INSTANCE_GENERATED <DUNE_INSTANCE_GENERATED>`
+#
+# .. rubric:: Utilities
+#
+# You would not use these directly under normal circumstances.
+#
+# * :ref:`dune_instance_parse_file_spec() <dune_instance_parse_file_spec>`
+# * :ref:`dune_instance_from_id() <dune_instance_from_id>`
+# * :ref:`dune_instance_generate_file() <dune_instance_generate_file>`
+#
+#
+# .. cmake_function:: dune_instance_begin
+#
+# .. cmake_brief::
+#
+# Prepare for a list of instances.
+#
+# .. cmake_param:: FILES
+# :multi:
+# :argname: file_spec
+#
+# List of template files with embedded templates.
+#
+# Read the given template files, and extract embedded templates. Run the
+# generator on the remaining file content with the variables currently in
+# effect.
+#
+# .. note::
+#
+# A matching :ref:`dune_instance_end() <dune_instance_end>` is required.
+# Since information is communicated through variables in the callers
+# scope, :ref:`dune_instance_begin()
+# <dune_instance_begin>`/:ref:`dune_instance_end() <dune_instance_end>`
+# blocks may not be nested inside the same scope. Since a function is a
+# new scope, it may safely contain a :ref:`dune_instance_begin()
+# <dune_instance_begin>`/:ref:`dune_instance_end() <dune_instance_end>`
+# block, even if it is itself called from one.
+#
+#
+# .. cmake_function:: dune_instance_add
+#
+# .. cmake_brief::
+#
+# Instantiate a template with the currently set variable values.
+#
+# .. cmake_param:: FILES
+# :multi:
+# :argname: file_spec
+#
+# List of template file specifications. These are usually the names of
+# template files with the ``.in`` extension removed. See the ID
+# parameter for details.
+#
+# .. cmake_param:: ID
+# :single:
+#
+# Used to build the names of generated files. Each file specification
+# together with this id is given to :ref:`dune_instance_from_id()
+# <dune_instance_from_id>` to determine the name of a template file and
+# the name of an instance file. To get unique instance file names this
+# ID should usually be a list of variable values joined together by
+# ``_``.
+#
+# Specifically, each file specification may be of the form
+# ``template_file_name:base_instance_file_name``, or it may be a single
+# token not containing ``:``. In the latter case, if that token
+# contains a trailing ``.in``, that is removed and the result is the base
+# instance file name. The base instance file name has the ``.in``
+# appended again to form the template file name.
+#
+# The template file name is used as-is to generate files from.
+#
+# The ID is mangled by replacing any runs of non-alphanumeric characters
+# with an underscore ``_``, and stripping any resulting underscore from
+# the beginning and the end. The result is inserted before any
+# extension into the base instance file name to form the instance file
+# name.
+#
+# .. cmake_param:: TEMPLATE
+# :single:
+#
+# Instantiate embedded templates by this name. Defaults to an empty
+# name, matching embedded templates without name.
+#
+# Instantiate any embedded templates that match the given template name,
+# substituting the current variables values. Then, generate files
+# according the the file specifications in the template, doing
+# substitutions as well.
+#
+#
+# .. cmake_function:: dune_instance_end
+#
+# .. cmake_brief::
+#
+# Close a block started by :ref:`dune_instance_begin()
+# <dune_instance_begin>`, and write the files generated from the
+# templates given there.
+#
+# Write the files generated from the template files given in
+# :ref:`dune_instance_begin() <dune_instance_begin>`, including any content
+# generated from embedded templates in :ref:`dune_instance_add()
+# <dune_instance_add>`.
+#
+#
+# .. cmake_function:: dune_instance_parse_file_spec
+#
+# .. cmake_brief::
+#
+# Parse a file specification into a template file name and an instance
+# file name.
+#
+# .. cmake_param:: spec
+# :positional:
+# :single:
+# :required:
+#
+# The file specification.
+#
+# .. cmake_param:: template_var
+# :positional:
+# :single:
+#
+# Name of the variable to store the template file name in. Can be empty
+# to discard the template file name.
+#
+# .. cmake_param:: instance_var
+# :positional:
+# :single:
+#
+# Name of the variable to store the instance file name in. Can be empty
+# to discard then instance file name.
+#
+# The file specification can be the name of a template file if it has
+# ``.in`` at the end, or the name of an instance file if it doesn't. The
+# name of the other file is obtained by appending or removing ``.in``, as
+# applicable. Both file names can also be given explicitly in the form
+# ``template_file_name:instance_file_name``.
+#
+# .. note::
+#
+# This is the function use to parse the file specifications in
+# :ref:`dune_instance_begin() <dune_instance_begin>`. It is also used
+# as a helper in :ref:`dune_instance_from_id() <dune_instance_from_id>`
+# to determine template file name and base instance file name.
+#
+#
+# .. cmake_function:: dune_instance_from_id
+#
+# .. cmake_brief::
+#
+# Determine a template file name and an instance file name from a file
+# specification and a unique id.
+#
+# .. cmake_param:: file_spec
+# :positional:
+# :single:
+# :required:
+#
+# The file specification.
+#
+# .. cmake_param:: id
+# :positional:
+# :single:
+# :required:
+#
+# The id specification. This should uniquely identify an instance.
+#
+# .. cmake_param:: template_var
+# :positional:
+# :single:
+#
+# Name of the variable to store the template file name in. Can be empty
+# to discard the template file name.
+#
+# .. cmake_param:: instance_var
+# :positional:
+# :single:
+#
+# Name of the variable to store the instance file name in. Can be empty
+# to discard the instance file name.
+#
+# The file specification is handed to :ref:`dune_instance_parse_file_spec()
+# <dune_instance_parse_file_spec>` to determine a template file name and a
+# *base* instance file name.
+#
+# The ID is mangled by replacing any runs of non-alphanumeric characters
+# with an underscore ``_``, and stripping any resulting underscore from the
+# beginning and the end. The result is inserted before any extension into
+# the base instance file name to form the instance file name.
+#
+# .. note::
+#
+# This is the function use to parse the file specifications given in
+# :ref:`dune_instance_add(FILES ...) <dune_instance_add>`.
+#
+#
+# .. cmake_function:: dune_instance_apply_bindir
+#
+# .. cmake_brief::
+#
+# Modify a filename to be relative to ``CMAKE_CURRENT_BINARY_DIR``.
+#
+# .. cmake_param:: fname_var
+# :positional:
+# :single:
+# :required:
+#
+# The name of the variable containing the file name.
+#
+# This is used to mimic the behaviour of ``configure_file()``. If the file
+# name given is not absolute, it is modified by prepending
+# ``${CMAKE_CURRENT_BINARY_DIR}``.
+#
+#
+# .. cmake_function:: dune_instance_generate_file
+#
+# .. cmake_brief::
+#
+# Convenience replacement for ``configure_file()``: enable standard
+# substitutions, register files as generated, and flag the same file
+# being generated twice.
+#
+# .. cmake_param:: TEMPLATE
+# :positional:
+# :single:
+# :required:
+#
+# The name of the template file.
+#
+# .. cmake_param:: INSTANCE
+# :positional:
+# :single:
+# :required:
+#
+# The name of the generated file. This is assumed relative to
+# ``${CMAKE_CURRENT_BINARY_DIR}``.
+#
+# Make sure the variables ``TEMPLATE``, ``INSTANCE``, and
+# ``BINDIR_INSTANCE`` are set to the parameter values and available for
+# substitution. Also set the variable ``GENERATED_SOURCE`` to a one-line
+# message that tells a human reader that this file is generated, and the
+# name of the template file it was generated from. The message also
+# includes hints for common editors telling them to switch to read-only
+# mode.
+#
+# Then generate the file as if by ``configure_file()``.
+#
+# If the instance file has been registered as a generated source file
+# before, this function generates a fatal error. This ensures that any
+# accidential attempt to generate the same file twice is caught. As a
+# special exception, if the generated content is the same as before, the
+# error is silently skipped.
+#
+#
+# .. cmake_variable:: DUNE_INSTANCE_GENERATED
+#
+# After :ref:`dune_instance_end() <dune_instance_end>`, this holds the list
+# of files that were generated. The list entries include an implied
+# ``${CMAKE_CURRENT_BINARY_DIR}``, as appropriate.
+#
+# Do not rely on the value of this variable and do not modify it inside a
+# :ref:`dune_instance_begin()
+# <dune_instance_begin>`/:ref:`dune_instance_end() <dune_instance_end>`
+# block.
+include_guard(GLOBAL)
+
+# macro to print additional information to the cmake output file.
+# Note: in cmake 3.15 this is available through the message(VERBOSE "...") function.
+macro(message_verbose TEXT)
+ if(CMAKE_VERSION VERSION_GREATER_EQUAL "3.15")
+ message(VERBOSE "${TEXT}")
+ else()
+ file(APPEND ${PROJECT_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeOutput.log "${TEXT}\n")
+ endif()
+endmacro(message_verbose)
+
+
+######################################################################
+#
+# Coping with cmake list shortcomings
+#
+
+# We use these commands internally to quote text before adding it to lists as
+# an element, and to unquote elements again after extracting them. The quoted
+# text is
+# - free of ';' characters. This avoids problems when using list(APPEND),
+# which does not quote ';' characters inside the appended element. It would
+# also avoid problems with list(INSERT), which mangles any cmake quoting in
+# the list it inserts to, but we don't actually use that command.
+# - free of '\' characters. This avoids problems with a list element that
+# ends in a '\' merging with the next element, because the '\' quotes the
+# ';' that is used to seperate the elements
+# - non-empty. This avoids the problem that cmake can't destinguish between
+# an empty list and a list with one empty element.
+function(dune_instance_quote_element VAR)
+ set(content "${${VAR}}")
+ string(REPLACE [[$]] [[$s]] content "${content}")
+ string(REPLACE [[;]] [[$:]] content "${content}")
+ string(REPLACE [[\]] [[$/]] content "${content}")
+ if(content STREQUAL "")
+ set(content [[$@]])
+ endif()
+ set("${VAR}" "${content}" PARENT_SCOPE)
+endfunction(dune_instance_quote_element)
+
+function(dune_instance_unquote_element VAR)
+ set(content "${${VAR}}")
+ string(REPLACE [[$@]] [[]] content "${content}")
+ string(REPLACE [[$/]] [[\]] content "${content}")
+ string(REPLACE [[$:]] [[;]] content "${content}")
+ string(REPLACE [[$s]] [[$]] content "${content}")
+ set("${VAR}" "${content}" PARENT_SCOPE)
+endfunction(dune_instance_unquote_element)
+
+######################################################################
+#
+# instance name and template name manipulation
+#
+
+function(dune_instance_parse_file_spec spec template_var instance_var)
+ string(REPLACE ":" ";" spec_items "${spec}")
+ list(LENGTH spec_items len)
+
+ # check arguments
+ if(len GREATER 2)
+ message(FATAL_ERROR "too many items in file specification: ${spec}")
+ endif(len GREATER 2)
+ if(len EQUAL 0)
+ message(FATAL_ERROR "empty file specification")
+ endif(len EQUAL 0)
+
+ # use as-is
+ if(len EQUAL 2)
+ list(GET spec_items 0 instance)
+ list(GET spec_items 1 template)
+ endif(len EQUAL 2)
+
+ # deduce
+ if(len EQUAL 1)
+ string(REGEX REPLACE ".in\\$" "" instance "${spec}")
+ set(template "${instance}.in")
+ endif(len EQUAL 1)
+
+ #export
+ if(NOT ("${template_var}" STREQUAL ""))
+ set("${template_var}" "${template}" PARENT_SCOPE)
+ endif(NOT ("${template_var}" STREQUAL ""))
+ if(NOT ("${instance_var}" STREQUAL ""))
+ set("${instance_var}" "${instance}" PARENT_SCOPE)
+ endif(NOT ("${instance_var}" STREQUAL ""))
+endfunction(dune_instance_parse_file_spec)
+
+# build output file name: parse the file_spec into a template name and a base
+# instance name. Mangle the ID by replacing anything special with "_" and
+# intersperse the result between basename and extension of the base instance
+# name. Use the result as the instance name.
+function(dune_instance_from_id file_spec id template_var instance_var)
+ dune_instance_parse_file_spec("${file_spec}" template base)
+
+ # split into prefix and suffix
+ if(base MATCHES "\\.")
+ string(REGEX REPLACE "\\.[^.]*\$" "" prefix "${base}")
+ string(REGEX REPLACE "^.*(\\.[^.]*)\$" "\\1" suffix "${base}")
+ else(base MATCHES "\\.")
+ set(prefix "${base}")
+ set(suffix)
+ endif(base MATCHES "\\.")
+
+ # mangle the id
+ string(REGEX REPLACE "[^a-zA-Z0-9]+" "_" mangled_id "${id}")
+ string(REGEX REPLACE "^_+" "" mangled_id "${mangled_id}")
+ string(REGEX REPLACE "_+\$" "" mangled_id "${mangled_id}")
+ if(mangled_id STREQUAL "")
+ message(FATAL_ERROR "\"${id}\" is empty after mangling")
+ endif(mangled_id STREQUAL "")
+
+ #export
+ if(NOT ("${template_var}" STREQUAL ""))
+ set("${template_var}" "${template}" PARENT_SCOPE)
+ endif(NOT ("${template_var}" STREQUAL ""))
+ if(NOT ("${instance_var}" STREQUAL ""))
+ set("${instance_var}" "${prefix}_${mangled_id}${suffix}" PARENT_SCOPE)
+ endif(NOT ("${instance_var}" STREQUAL ""))
+endfunction(dune_instance_from_id)
+
+# mimic the behaviour of configure_file(), placing relative paths in the
+# current binary dir
+function(dune_instance_apply_bindir fname_var)
+ if(NOT (IS_ABSOLUTE ${fname_var}))
+ set(${fname_var} "${CMAKE_CURRENT_BINARY_DIR}/${${fname_var}}" PARENT_SCOPE)
+ endif()
+endfunction(dune_instance_apply_bindir)
+
+
+######################################################################
+#
+# File generation
+#
+
+function(dune_instance_set_generated)
+ # prepare instance substitution variables
+ set(GENERATED_SOURCE
+ "generated from ${TEMPLATE} by cmake -*- buffer-read-only:t -*- vim: set readonly:"
+ PARENT_SCOPE)
+
+ set(BINDIR_INSTANCE "${INSTANCE}")
+ dune_instance_apply_bindir(BINDIR_INSTANCE)
+ set(BINDIR_INSTANCE "${BINDIR_INSTANCE}" PARENT_SCOPE)
+endfunction(dune_instance_set_generated)
+
+# Read a template file and split it into three lists
+# - content_parts contains the parts before, between, and after templates
+# - template_parts contains the content of each template
+# - template_names contains the names of each template
+# The elements in the lists are quoted using dune_instance_quote_element() to
+# protect against problems with empty elements and against cmakes list()
+# command butchering it's own quoting.
+function(dune_instance_parse_embedded name content_parts template_parts template_names)
+ message_verbose("Parsing ${name} for embedded templates")
+ file(READ "${name}" content)
+ # ensure that the file content ends in a newline, which makes searching for
+ # template marker easier
+ string(APPEND content "\n")
+
+ set(content_list "")
+ set(template_list "")
+ set(template_name_list "")
+ set(acc "")
+ set(lineno 0)
+ set(in_template FALSE)
+ while(NOT (content STREQUAL ""))
+ string(FIND "${content}" "\n" nextline)
+ math(EXPR nextline "${nextline} + 1")
+
+ string(SUBSTRING "${content}" 0 "${nextline}" line)
+ string(SUBSTRING "${content}" "${nextline}" -1 content)
+ math(EXPR lineno "${lineno} + 1")
+
+ if(line MATCHES "(.*)(@((end)?template)([ \t]+([-+._/0-9a-zA-Z]*))?@)(.*)")
+ set(prefix "${CMAKE_MATCH_1}")
+ set(sep "${CMAKE_MATCH_2}")
+ set(sep_keyword "${CMAKE_MATCH_3}")
+ set(sep_name "${CMAKE_MATCH_6}")
+ set(sep_suffix "${CMAKE_MATCH_7}")
+
+ if(in_template)
+ if(NOT (sep_keyword STREQUAL "endtemplate"))
+ message(FATAL_ERROR "\
+${name}:${lineno}: '${sep}' nested inside...
+${name}:${template_lineno}: ...'${template_sep}' here")
+ endif()
+
+ if(NOT ((sep_name STREQUAL "") OR (sep_name STREQUAL template_name)))
+ message(FATAL_ERROR "\
+${name}:${template_lineno}: '${template_sep}' closed by nonmatching...
+${name}:${lineno}: ...'${sep}' here")
+ endif()
+
+ dune_instance_quote_element(acc)
+ list(APPEND template_list "${acc}")
+ dune_instance_quote_element(template_name)
+ list(APPEND template_name_list "${template_name}")
+
+ set(in_template FALSE)
+ else()
+ if(NOT (sep_keyword STREQUAL "template"))
+ message(FATAL_ERROR "${name}:${lineno}: Lone '${sep}'")
+ endif()
+
+ dune_instance_quote_element(acc)
+ list(APPEND content_list "${acc}")
+ set(template_sep "${sep}")
+ set(template_name "${sep_name}")
+ set(template_lineno "${lineno}")
+
+ set(in_template TRUE)
+ endif()
+ set(acc "")
+ else() # line did not match seperator
+ string(APPEND acc "${line}")
+ endif()
+ endwhile()
+
+ if(in_template)
+ message(FATAL_ERROR "${name}:${template_lineno}: Unclosed '${template_sep}'")
+ endif()
+
+ dune_instance_quote_element(acc)
+ list(APPEND content_list "${acc}")
+
+ set("${content_parts}" "${content_list}" PARENT_SCOPE)
+ set("${template_parts}" "${template_list}" PARENT_SCOPE)
+ set("${template_names}" "${template_name_list}" PARENT_SCOPE)
+endfunction(dune_instance_parse_embedded)
+
+# Take the name of a list variable containing content parts other then
+# embedded templates and instanciate each part. Put the result back into the
+# same variable. List elements are quoted.
+function(dune_instance_generate_parts _parts_list)
+ set(_acc "")
+ foreach(_part IN LISTS "${_parts_list}")
+ dune_instance_unquote_element(_part)
+ string(CONFIGURE "${_part}" _part)
+ dune_instance_quote_element(_part)
+ list(APPEND _acc "${_part}")
+ endforeach(_part)
+ set("${_parts_list}" "${_acc}" PARENT_SCOPE)
+endfunction(dune_instance_generate_parts)
+
+function(dune_instance_generate_file TEMPLATE INSTANCE)
+ if(("${INSTANCE}" STREQUAL "") OR ("${TEMPLATE}" STREQUAL ""))
+ message(FATAL_ERROR "need both INSTANCE and TEMPLATE")
+ endif(("${INSTANCE}" STREQUAL "") OR ("${TEMPLATE}" STREQUAL ""))
+
+ # prepare instance substitution variables
+ dune_instance_set_generated()
+
+ # do the generation
+ message_verbose("Generating ${TEMPLATE} -> ${INSTANCE}")
+ file(READ "${TEMPLATE}" _content)
+ string(CONFIGURE "${_content}" _content)
+
+ # make sure we did not generate this file before
+ get_property(_seen SOURCE "${BINDIR_INSTANCE}" PROPERTY DUNE_INSTANCE_GENERATED)
+ if(_seen)
+ file(READ "${BINDIR_INSTANCE}" _oldcontent)
+ if(NOT (_content STREQUAL _oldcontent))
+ message(FATAL_ERROR "Attempt to generate ${INSTANCE} (from ${TEMPLATE}), "
+ "which has already been generated with different content")
+ endif()
+ # otherwise, the content matches, so nothing to do
+ else(_seen)
+ # _seen was false, but the file may still be around from a previous cmake
+ # run, only write if changed to avoid recompilations
+ dune_write_changed_file("${BINDIR_INSTANCE}" "${_content}")
+ set_property(SOURCE "${BINDIR_INSTANCE}" PROPERTY DUNE_INSTANCE_GENERATED TRUE)
+ set_property(DIRECTORY APPEND
+ PROPERTY CMAKE_CONFIGURE_DEPENDS "${TEMPLATE}")
+ endif(_seen)
+endfunction(dune_instance_generate_file)
+
+# only write if the content changes, avoiding recompilations
+function(dune_write_changed_file name content)
+ if(EXISTS "${name}")
+ file(READ "${name}" oldcontent)
+ if(content STREQUAL oldcontent)
+ return()
+ endif()
+ endif()
+ file(WRITE "${name}" "${content}")
+endfunction(dune_write_changed_file)
+
+######################################################################
+#
+# High-level interface commands
+#
+
+function(dune_instance_begin)
+ cmake_parse_arguments(_arg
+ "" # options
+ "" # one_value_keywords
+ "FILES" # multi_value_keywords
+ ${ARGV}
+ )
+ if(DEFINED _arg_UNPARSED_ARGUMENTS)
+ message(FATAL_ERROR "unrecognized arguments: ${_arg_UNPARSED_ARGUMENTS}")
+ endif(DEFINED _arg_UNPARSED_ARGUMENTS)
+
+ set(_all_content_parts "")
+ set(_all_template_parts "")
+ set(_all_template_names "")
+ foreach(_spec IN LISTS _arg_FILES)
+ dune_instance_parse_file_spec("${_spec}" TEMPLATE INSTANCE)
+ dune_instance_set_generated()
+ # reconfigure if the input template file changes
+ set_property(DIRECTORY APPEND
+ PROPERTY CMAKE_CONFIGURE_DEPENDS "${TEMPLATE}")
+ dune_instance_parse_embedded("${TEMPLATE}"
+ _file_content_parts _file_template_parts _file_template_names)
+ dune_instance_generate_parts(_file_content_parts)
+
+ dune_instance_quote_element(_file_content_parts)
+ list(APPEND _all_content_parts "${_file_content_parts}")
+
+ dune_instance_quote_element(_file_template_parts)
+ list(APPEND _all_template_parts "${_file_template_parts}")
+
+ dune_instance_quote_element(_file_template_names)
+ list(APPEND _all_template_names "${_file_template_names}")
+ endforeach(_spec)
+ set(_DUNE_INSTANCE_GLOBAL_FILES "${_arg_FILES}" PARENT_SCOPE)
+ set(_DUNE_INSTANCE_CONTENT_PARTS "${_all_content_parts}" PARENT_SCOPE)
+ set(_DUNE_INSTANCE_TEMPLATE_PARTS "${_all_template_parts}" PARENT_SCOPE)
+ set(_DUNE_INSTANCE_TEMPLATE_NAMES "${_all_template_names}" PARENT_SCOPE)
+
+ set(DUNE_INSTANCE_GENERATED "" PARENT_SCOPE)
+endfunction(dune_instance_begin)
+
+
+function(dune_instance_add)
+ cmake_parse_arguments(_arg
+ "" # options
+ "ID;TEMPLATE" # one_value_keywords
+ "FILES" # multi_value_keywords
+ ${ARGV}
+ )
+
+ if(DEFINED _arg_UNPARSED_ARGUMENTS)
+ message(FATAL_ERROR "unrecognized arguments: ${_arg_UNPARSED_ARGUMENTS}")
+ endif(DEFINED _arg_UNPARSED_ARGUMENTS)
+
+ # ensure _arg_TEMPLATE is set, even if it is the empty value
+ set(_arg_TEMPLATE "${_arg_TEMPLATE}")
+
+ set(_template_used FALSE)
+
+ ######################################################################
+ # Instantiate global (list) templates
+ set(_all_content_parts "")
+ list(LENGTH _DUNE_INSTANCE_GLOBAL_FILES _file_count)
+ foreach(_file_index RANGE "${_file_count}")
+ # filter out the end of the range, this also works with empty ranges
+ if(_file_index EQUAL _file_count)
+ break()
+ endif()
+ list(GET _DUNE_INSTANCE_GLOBAL_FILES "${_file_index}" _spec)
+ dune_instance_parse_file_spec("${_spec}" TEMPLATE INSTANCE)
+ dune_instance_set_generated()
+
+ list(GET _DUNE_INSTANCE_CONTENT_PARTS "${_file_index}" _content_parts)
+ dune_instance_unquote_element(_content_parts)
+ list(GET _DUNE_INSTANCE_TEMPLATE_PARTS "${_file_index}" _template_parts)
+ dune_instance_unquote_element(_template_parts)
+ list(GET _DUNE_INSTANCE_TEMPLATE_NAMES "${_file_index}" _template_names)
+ dune_instance_unquote_element(_template_names)
+
+ set(_content_parts_result "")
+ list(LENGTH _template_parts _parts_count)
+ foreach(_part_index RANGE "${_parts_count}")
+ list(GET _content_parts "${_part_index}" _content_part)
+ # The list of template parts should be one shorter than the list of
+ # content parts
+ if(_part_index LESS _parts_count)
+ list(GET _template_names "${_part_index}" _template_name)
+ dune_instance_unquote_element(_template_name)
+ if(_template_name STREQUAL _arg_TEMPLATE)
+ set(_template_used TRUE)
+
+ list(GET _template_parts "${_part_index}" _template_part)
+ dune_instance_unquote_element(_template_part)
+ string(CONFIGURE "${_template_part}" _result)
+
+ dune_instance_unquote_element(_content_part)
+ string(APPEND _content_part "${_result}")
+ dune_instance_quote_element(_content_part)
+ endif()
+ endif()
+ list(APPEND _content_parts_result "${_content_part}")
+ endforeach(_part_index)
+
+ dune_instance_quote_element(_content_parts_result)
+ list(APPEND _all_content_parts "${_content_parts_result}")
+ endforeach(_file_index)
+ set(_DUNE_INSTANCE_CONTENT_PARTS "${_all_content_parts}" PARENT_SCOPE)
+
+ ######################################################################
+ # instantiate per instance templates
+ foreach(_spec IN LISTS _arg_FILES)
+ set(_template_used TRUE)
+ dune_instance_from_id("${_spec}" "${_arg_ID}" _template_file _instance_file)
+ set(_bindir_instance_file "${_instance_file}")
+ dune_instance_apply_bindir(_bindir_instance_file)
+ dune_instance_generate_file("${_template_file}" "${_instance_file}")
+ list(FIND DUNE_INSTANCE_GENERATED "${_bindir_instance_file}" _found_pos)
+ if(_found_pos EQUAL -1)
+ list(APPEND DUNE_INSTANCE_GENERATED "${_bindir_instance_file}")
+ endif()
+ endforeach(_spec)
+
+ # if we did not instantiate anything, that is probably an error
+ if(NOT _template_used)
+ message(FATAL_ERROR "No embedded template matched template '${_arg_TEMPLATE}', and no instance template files were given")
+ endif()
+ set(DUNE_INSTANCE_GENERATED "${DUNE_INSTANCE_GENERATED}" PARENT_SCOPE)
+endfunction(dune_instance_add)
+
+function(dune_instance_end)
+ if(ARGC GREATER 0)
+ message(FATAL_ERROR "dune_instance_end() does not take any arguments")
+ endif()
+
+ ######################################################################
+ # Write global instances
+ list(LENGTH _DUNE_INSTANCE_GLOBAL_FILES _file_count)
+ foreach(_file_index RANGE "${_file_count}")
+ # filter out the end of the range, this also works with empty ranges
+ if(_file_index EQUAL _file_count)
+ break()
+ endif()
+ list(GET _DUNE_INSTANCE_GLOBAL_FILES "${_file_index}" _spec)
+ dune_instance_parse_file_spec("${_spec}" TEMPLATE INSTANCE)
+ set(BINDIR_INSTANCE "${INSTANCE}")
+ dune_instance_apply_bindir(BINDIR_INSTANCE)
+
+ # make sure we did not generate this file before
+ get_property(_seen SOURCE "${BINDIR_INSTANCE}" PROPERTY DUNE_INSTANCE_GENERATED)
+ if("${_seen}")
+ message(FATAL_ERROR "Attempt to generate ${INSTANCE} (from ${TEMPLATE}), "
+ "which has already been generated")
+ endif("${_seen}")
+
+ list(GET _DUNE_INSTANCE_CONTENT_PARTS "${_file_index}" _content_parts)
+ dune_instance_unquote_element(_content_parts)
+
+ set(_content "")
+ foreach(_part IN LISTS _content_parts)
+ dune_instance_unquote_element(_part)
+ string(APPEND _content "${_part}")
+ endforeach(_part)
+ # remove the final newline that we appended when reading the template file
+ string(REGEX REPLACE "\n\$" "" _content "${_content}")
+
+ message_verbose("Writing ${INSTANCE}")
+ # only write if the content changes, avoiding recompilations
+ dune_write_changed_file("${BINDIR_INSTANCE}" "${_content}")
+
+ set_property(SOURCE "${BINDIR_INSTANCE}" PROPERTY DUNE_INSTANCE_GENERATED TRUE)
+ list(APPEND DUNE_INSTANCE_GENERATED "${BINDIR_INSTANCE}")
+ endforeach(_file_index)
+
+ set(DUNE_INSTANCE_GENERATED "${DUNE_INSTANCE_GENERATED}" PARENT_SCOPE)
+endfunction(dune_instance_end)
--- /dev/null
+message(DEPRECATION "The cmake file 'DuneMPI.cmake' is deprecated. Include 'AddMPIFlags.cmake' instead.")
+include(AddMPIFlags)
--- /dev/null
+# Core DUNE module for CMake.
+#
+# Documentation of the public API defined in this module:
+#
+# .. cmake_function:: dune_project
+#
+# Initialize a Dune module. This function needs to be run from every
+# top-level CMakeLists.txt file. It sets up the module, defines basic
+# variables and manages depedencies. Don't forget to call
+# :ref:`finalize_dune_project` afterwards.
+#
+# .. cmake_function:: finalize_dune_project
+#
+# Finalize a Dune module. This function needs to be run at the end of
+# every top-level CMakeLists.txt file. Among other things it creates
+# the cmake package configuration files. Modules can add additional
+# entries to these files by setting the variable @${ProjectName}_INIT.
+#
+# .. cmake_function:: target_link_libraries
+#
+# .. cmake_brief::
+#
+# Overwrite of CMake's :code:`target_link_libraries`. If no interface key
+# word (like PUBLIC, INTERFACE, PRIVATE etc.) is given, PUBLIC is added.
+# This is to fix problems with CMP0023.
+#
+# .. cmake_param:: basename
+#
+# .. cmake_function:: dune_add_library
+#
+# .. cmake_brief::
+#
+# Add a library to a Dune module!
+#
+# .. cmake_param:: basename
+# :single:
+# :required:
+# :positional:
+#
+# The basename for the library. On Unix this created :code:`lib<basename>.so`
+# and :code:`lib<basename>.a`
+#
+# .. cmake_param:: NO_EXPORT
+# :option:
+#
+# If omitted the library is exported for usage in other modules.
+#
+# .. cmake_param:: ADD_LIBS
+# :multi:
+#
+# A list of libraries that should be incorporated into this library.
+#
+# .. cmake_param:: APPEND
+# :option:
+#
+# Whether the library should be appended to the
+# exported libraries. If there a DUNE module must
+# make several libraries available, then first one
+# must not use this option but the others have to
+# use it. Otherwise only the last library will be
+# exported as the others will be overwritten.
+#
+# .. cmake_param:: OBJECT
+# :option:
+#
+# .. note::
+# This feature will very likely vanish in Dune 3.0
+#
+# .. cmake_param:: SOURCES
+# :multi:
+# :required:
+#
+# The source files from which to build the library.
+#
+# .. cmake_param:: COMPILE_FLAGS
+# :single:
+#
+# Any additional compile flags for building the library.
+#
+# .. cmake_function:: dune_target_link_libraries
+#
+# .. cmake_param:: BASENAME
+#
+# .. cmake_param:: LIBRARIES
+#
+# Link libraries to the static and shared version of
+# library BASENAME
+#
+#
+# .. cmake_function:: add_dune_all_flags
+#
+# .. cmake_param:: targets
+# :single:
+# :required:
+# :positional:
+#
+# The targets to add the flags of all external libraries to.
+#
+# This function is superseded by :ref:`dune_target_enable_all_packages`.
+#
+# Documentation of internal macros in this module:
+#
+# dune_module_to_uppercase(upper_name module_name)
+#
+# Converts a module name given by module_name into an uppercase string
+# upper_name where all dashes (-) are replaced by underscores (_)
+# Example: dune-common -> DUNE_COMMON
+#
+# dune_module_information(MODULE_DIR [QUIET])
+#
+# Parse ${MODULE_DIR}/dune.module and provide that information.
+# If the second argument is QUIET no status message is printed.
+#
+# dune_create_dependency_tree()
+#
+# Creates the dependency tree of the module.
+#
+# dune_module_to_macro(_macro_name, _dune_module)
+#
+# Converts a module name given by _dune_module into a string _macro_name
+# where all dashes (-) are removed and letters after a dash are capitalized
+# Example: dune-grid-howto -> DuneGridHowto
+#
+# _macro_name: variable where the name will be stored.
+# _dune_module: the name of the dune module.
+#
+# dune_regenerate_config_cmake()
+#
+# Creates a new config_collected.h.cmake file in ${CMAKE_CURRENT_BINARY_DIR} that
+# consists of entries from ${CMAKE_CURRENT_SOURCE_DIR}/config.h.cmake
+# and includes non-private entries from the config.h.cmake files
+# of all dependent modules.
+# Finally config.h is created from config_collected.h.cmake.
+#
+include_guard(GLOBAL)
+
+enable_language(C) # Enable C to skip CXX bindings for some tests.
+
+# By default use -pthread flag. This option is set at the beginning to enforce it for
+# find_package(Threads) everywhere
+set(THREADS_PREFER_PTHREAD_FLAG TRUE CACHE BOOL "Prefer -pthread compiler and linker flag")
+
+include(FeatureSummary)
+include(DuneEnableAllPackages)
+include(DuneTestMacros)
+include(OverloadCompilerFlags)
+include(DuneSymlinkOrCopy)
+include(DunePathHelper)
+include(DuneExecuteProcess)
+
+macro(target_link_libraries)
+ # do nothing if not at least the two arguments target and scope are passed
+ if(${ARGC} GREATER_EQUAL 2)
+ target_link_libraries_helper(${ARGN})
+ endif()
+endmacro(target_link_libraries)
+
+# helper for overwritten target_link_libraries to handle arguments more easily
+macro(target_link_libraries_helper TARGET SCOPE)
+ if(${SCOPE} MATCHES "^(PRIVATE|INTERFACE|PUBLIC|LINK_PRIVATE|LINK_PUBLIC|LINK_INTERFACE_LIBRARIES)$")
+ _target_link_libraries(${TARGET} ${SCOPE} ${ARGN})
+ else()
+ message(DEPRECATION "Calling target_link_libraries without the <scope> argument is deprecated.")
+ _target_link_libraries(${TARGET} PUBLIC ${SCOPE} ${ARGN})
+ endif()
+endmacro(target_link_libraries_helper)
+
+# Converts a module name given by _module into an uppercase string
+# _upper where all dashes (-) are replaced by underscores (_)
+# Example: dune-common -> DUNE_COMMON
+macro(dune_module_to_uppercase _upper _module)
+ string(TOUPPER "${_module}" ${_upper})
+ string(REPLACE "-" "_" ${_upper} "${${_upper}}")
+endmacro(dune_module_to_uppercase _upper _module)
+
+macro(find_dune_package module)
+ include(CMakeParseArguments)
+ cmake_parse_arguments(DUNE_FIND "REQUIRED" "VERSION" "" ${ARGN})
+ if(DUNE_FIND_REQUIRED)
+ set(required REQUIRED)
+ set_package_properties(${module} PROPERTIES TYPE REQUIRED)
+ set(_warning_level "FATAL_ERROR")
+ else()
+ unset(required)
+ set(_warning_level "WARNING")
+ set_package_properties(${module} PROPERTIES TYPE OPTIONAL)
+ endif()
+ if(DUNE_FIND_VERSION MATCHES "(>=|=|<=).*")
+ string(REGEX REPLACE "(>=|=|<=)(.*)" "\\1" DUNE_FIND_VERSION_OP ${DUNE_FIND_VERSION})
+ string(REGEX REPLACE "(>=|=|<=)(.*)" "\\2" DUNE_FIND_VERSION_NUMBER ${DUNE_FIND_VERSION})
+ string(STRIP ${DUNE_FIND_VERSION_NUMBER} DUNE_FIND_VERSION_NUMBER)
+ extract_major_minor_version("${DUNE_FIND_VERSION_NUMBER}" DUNE_FIND_VERSION)
+ set(DUNE_FIND_VERSION_STRING "${DUNE_FIND_VERSION_MAJOR}.${DUNE_FIND_VERSION_MINOR}.${DUNE_FIND_VERSION_REVISION}")
+ else()
+ set(DUNE_FIND_VERSION_STRING "0.0.0")
+ endif()
+ if(NOT ${module}_FOUND)
+ if(NOT (${module}_DIR OR ${module}_ROOT OR
+ "${CMAKE_PREFIX_PATH}" MATCHES ".*${module}.*"))
+ string(REPLACE ${ProjectName} ${module} ${module}_DIR
+ ${PROJECT_BINARY_DIR})
+ endif()
+ find_package(${module} NO_CMAKE_PACKAGE_REGISTRY)
+ endif()
+ if(NOT ${module}_FOUND AND NOT CMAKE_DISABLE_FIND_PACKAGE_${module})
+ message(STATUS "No full CMake package configuration support available."
+ " Falling back to pkg-config.")
+ # use pkg-config
+ find_package(PkgConfig)
+ if(NOT PKG_CONFIG_FOUND AND required)
+ message(FATAL_ERROR "Could not find module ${module}. We tried to use"
+ "pkg-config but could not find it. ")
+ endif()
+ pkg_check_modules (${module} ${required} ${module}${DUNE_FIND_VERSION})
+ set(${module}_FAKE_CMAKE_PKGCONFIG TRUE)
+ endif()
+ if(${module}_FAKE_CMAKE_PKGCONFIG)
+ # compute the path to the libraries
+ if(${module}_LIBRARIES)
+ unset(_module_lib)
+ foreach(lib ${${module}_LIBRARIES})
+ foreach(libdir ${${module}_LIBRARY_DIRS})
+ if(EXISTS ${libdir}/lib${lib}.a)
+ set(_module_lib ${libdir}/lib${lib}.a)
+ set(_module_lib_static "STATIC")
+ endif()
+ if(EXISTS ${libdir}/lib${lib}.so)
+ set(_module_lib ${libdir}/lib${lib}.so)
+ set(_module_lib_static "")
+ endif()
+ if(_module_lib)
+ #import library
+ add_library(${lib} ${_module_lib_static} IMPORTED)
+ set_property(TARGET ${lib} APPEND PROPERTY IMPORTED_CONFIGURATIONS NOCONFIG)
+ set_target_properties(${lib} PROPERTIES
+ IMPORTED_LINK_INTERFACE_LANGUAGES_NOCONFIG "CXX"
+ IMPORTED_LOCATION_NOCONFIG "${_module_lib}")
+ break()
+ endif()
+ endforeach()
+ endforeach()
+ endif()
+ if(NOT ${module}_MODULE_PATH)
+ if(${module}_INCLUDE_DIRS)
+ list(GET ${module}_INCLUDE_DIRS 0 _dir)
+ if(EXISTS ${_dir}/../share/dune/cmake/modules)
+ set(${module}_MODULE_PATH ${_dir}/../share/dune/cmake/modules)
+ endif()
+ endif()
+ endif()
+ unset(${module}_FAKE_CMAKE_PKGCONFIG)
+ endif()
+ if(${module}_FOUND)
+ # parse other module's dune.module file to generate variables for config.h
+ unset(${module}_dune_module)
+ foreach(_dune_module_file
+ ${${module}_PREFIX}/dune.module
+ ${${module}_PREFIX}/lib/dunecontrol/${module}/dune.module
+ ${${module}_PREFIX}/lib64/dunecontrol/${module}/dune.module)
+ if(EXISTS ${_dune_module_file})
+ get_filename_component(_dune_module_file_path ${_dune_module_file} PATH)
+ dune_module_information(${_dune_module_file_path})# QUIET)
+ set(${module}_dune_module 1)
+ set(DUNE_FIND_MOD_VERSION_STRING "${DUNE_VERSION_MAJOR}.${DUNE_VERSION_MINOR}.${DUNE_VERSION_REVISION}")
+ # check whether dependency mathes version requirement
+ unset(module_version_wrong)
+ if(DUNE_FIND_VERSION_OP MATCHES ">=")
+ if(NOT (DUNE_FIND_MOD_VERSION_STRING VERSION_EQUAL DUNE_FIND_VERSION_STRING OR
+ DUNE_FIND_MOD_VERSION_STRING VERSION_GREATER DUNE_FIND_VERSION_STRING))
+ set(module_version_wrong 1)
+ endif()
+ elseif(DUNE_FIND_VERSION_OP MATCHES "<=")
+ if(NOT (DUNE_FIND_MOD_VERSION_STRING VERSION_EQUAL DUNE_FIND_VERSION_STRING OR
+ DUNE_FIND_MOD_VERSION_STRING VERSION_LESS DUNE_FIND_VERSION_STRING))
+ set(module_version_wrong 1)
+ endif()
+ elseif(DUNE_FIND_VERSION_OP MATCHES "=" AND
+ NOT (DUNE_FIND_MOD_VERSION_STRING VERSION_EQUAL DUNE_FIND_VERSION_STRING))
+ set(module_version_wrong 1)
+ endif()
+ endif()
+ endforeach()
+ if(NOT ${module}_dune_module)
+ message(${_warning_level} "Could not find dune.module file for module ${module} "
+ "in ${${module}_PREFIX}, ${${module}_PREFIX}/lib/dunecontrol/${module}/, "
+ "${${module}_PREFIX}/lib64/dunecontrol/${module}/dune.module")
+ set(${module}_FOUND OFF)
+ endif()
+ if(module_version_wrong)
+ message(${_warning_level} "Could not find requested version of module ${module}. "
+ "Requested version was ${DUNE_FIND_VERSION}, found version is ${DUNE_FIND_MOD_VERSION_STRING}")
+ set(${module}_FOUND OFF)
+ endif()
+ else(${module}_FOUND)
+ if(required)
+ message(FATAL_ERROR "Could not find required module ${module}.")
+ endif()
+ endif()
+ set(DUNE_${module}_FOUND ${${module}_FOUND})
+endmacro(find_dune_package module)
+
+macro(extract_line HEADER OUTPUT FILE_NAME)
+ set(REGEX "^${HEADER}[ ]*[^\\n]+")
+ file(STRINGS "${FILE_NAME}" OUTPUT1 REGEX "${REGEX}")
+ if(OUTPUT1)
+ set(REGEX "^[ ]*${HEADER}[ ]*(.+)[ ]*$")
+ string(REGEX REPLACE ${REGEX} "\\1" ${OUTPUT} "${OUTPUT1}")
+ else(OUTPUT1)
+ set(OUTPUT OUTPUT-NOTFOUND)
+ endif()
+endmacro(extract_line)
+
+#
+# split list of modules, potentially with version information
+# into list of modules and list of versions
+#
+macro(split_module_version STRING MODULES VERSIONS)
+ set(REGEX "[a-zA-Z0-9-]+[ ]*(\\([ ]*([^ ]+)?[ ]*[^ ]+[ ]*\\))?")
+ string(REGEX MATCHALL "${REGEX}" matches "${STRING}")
+ set(${MODULES} "")
+ set(${VERSIONS} "")
+ foreach(i ${matches})
+ string(REGEX REPLACE "^([a-zA-Z0-9-]+).*$" "\\1" mod ${i})
+ string(REGEX MATCH "\\([ ]*(([^ ]+)?[ ]*[^ ]+)[ ]*\\)" have_version
+ ${i})
+ if(have_version)
+ string(REGEX REPLACE "^\\([ ]*([^ ]*[ ]*[^ ]+)[ ]*\\)$" "\\1"
+ version ${have_version})
+ else()
+ set(version " ") # Mark as no version requested.
+ # Having a space is mandatory as we will append it to a list
+ # and an empty string will not be treated as entry we append to it.
+ endif()
+ list(APPEND ${MODULES} ${mod})
+ list(APPEND ${VERSIONS} ${version})
+ endforeach()
+endmacro(split_module_version)
+
+#
+# Convert a string with spaces in a list which is a string with semicolon
+#
+function(convert_deps_to_list var)
+ string(REGEX REPLACE "([a-zA-Z0-9\\)]) ([a-zA-Z0-9])" "\\1;\\2" ${var} ${${var}})
+ set(${var} ${${var}} PARENT_SCOPE)
+endfunction(convert_deps_to_list var)
+
+#
+# extracts major, minor, and revision from version string
+#
+function(extract_major_minor_version version_string varname)
+ string(REGEX REPLACE "([0-9]+).*" "\\1" ${varname}_MAJOR "${version_string}")
+ string(REGEX REPLACE "[0-9]+\\.([0-9]+).*" "\\1" ${varname}_MINOR "${version_string}")
+ string(REGEX REPLACE "[0-9]+\\.[0-9]+\\.([0-9]+).*" "\\1" ${varname}_REVISION "${version_string}")
+ set(${varname}_MAJOR "${${varname}_MAJOR}" PARENT_SCOPE) # make variable accessible in parent scope
+
+ # remove false matches in version string and export to parent scop
+ string(REGEX MATCH "[^0-9]" NON_NUMBER_CHARACTER "${${varname}_MINOR}")
+ if(NON_NUMBER_CHARACTER)
+ set(${varname}_MINOR "0" PARENT_SCOPE)
+ else()
+ set(${varname}_MINOR ${${varname}_MINOR} PARENT_SCOPE)
+ endif()
+ string(REGEX MATCH "[^0-9]" NON_NUMBER_CHARACTER "${${varname}_REVISION}")
+ if(NON_NUMBER_CHARACTER)
+ set(${varname}_REVISION "0" PARENT_SCOPE)
+ else()
+ set(${varname}_REVISION ${${varname}_REVISION} PARENT_SCOPE)
+ endif()
+endfunction(extract_major_minor_version version_string varname)
+
+# add dune-common version from dune.module to config.h
+# optional second argument is verbosity
+macro(dune_module_information MODULE_DIR)
+ # find version strings
+ extract_line("Version:" MODULE_LINE "${MODULE_DIR}/dune.module")
+ if(NOT MODULE_LINE MATCHES ".+")
+ message(FATAL_ERROR "${MODULE_DIR}/dune.module is missing a version.")
+ endif()
+
+ string(REGEX REPLACE ".*Version:[ ]*([^ \n]+).*" "\\1" DUNE_MOD_VERSION "${MODULE_LINE}")
+ extract_major_minor_version("${DUNE_MOD_VERSION}" DUNE_VERSION)
+
+ # find strings for module name, maintainer
+ # 1. Check for line starting with Module
+ extract_line("Module:" DUNE_MOD_NAME "${MODULE_DIR}/dune.module")
+ if(NOT DUNE_MOD_NAME)
+ message(FATAL_ERROR "${MODULE_DIR}/dune.module is missing a module name.")
+ endif()
+
+ # 2. Check for line starting with Maintainer
+ extract_line("Maintainer:" DUNE_MAINTAINER "${MODULE_DIR}/dune.module")
+ if(NOT DUNE_MAINTAINER)
+ message(FATAL_ERROR "${MODULE_DIR}/dune.module is missing a maintainer.")
+ endif()
+
+ # 3. Check for line starting with Depends
+ extract_line("Depends:" ${DUNE_MOD_NAME}_DEPENDS "${MODULE_DIR}/dune.module")
+ if(${DUNE_MOD_NAME}_DEPENDS)
+ split_module_version(${${DUNE_MOD_NAME}_DEPENDS} ${DUNE_MOD_NAME}_DEPENDS_MODULE ${DUNE_MOD_NAME}_DEPENDS_VERSION)
+ foreach(_mod ${${DUNE_MOD_NAME}_DEPENDS})
+ set(${_mod}_REQUIRED REQUIRED)
+ endforeach()
+ convert_deps_to_list(${DUNE_MOD_NAME}_DEPENDS)
+ if(NOT ("${ARGV1}" STREQUAL QUIET))
+ message(STATUS "Dependencies for ${DUNE_MOD_NAME}: ${${DUNE_MOD_NAME}_DEPENDS}")
+ endif()
+ endif()
+
+ # 4. Check for line starting with Suggests
+ extract_line("Suggests:" ${DUNE_MOD_NAME}_SUGGESTS "${MODULE_DIR}/dune.module")
+ if(${DUNE_MOD_NAME}_SUGGESTS)
+ split_module_version(${${DUNE_MOD_NAME}_SUGGESTS} ${DUNE_MOD_NAME}_SUGGESTS_MODULE ${DUNE_MOD_NAME}_SUGGESTS_VERSION)
+ convert_deps_to_list(${DUNE_MOD_NAME}_SUGGESTS)
+ if(NOT ("${ARGV1}" STREQUAL QUIET))
+ message(STATUS "Suggestions for ${DUNE_MOD_NAME}: ${${DUNE_MOD_NAME}_SUGGESTS}")
+ endif()
+ endif()
+
+ dune_module_to_uppercase(DUNE_MOD_NAME_UPPERCASE ${DUNE_MOD_NAME})
+
+ # 5. Check for optional meta data
+ extract_line("Author:" ${DUNE_MOD_NAME_UPPERCASE}_AUTHOR "${MODULE_DIR}/dune.module")
+ extract_line("Description:" ${DUNE_MOD_NAME_UPPERCASE}_DESCRIPTION "${MODULE_DIR}/dune.module")
+ extract_line("URL:" ${DUNE_MOD_NAME_UPPERCASE}_URL "${MODULE_DIR}/dune.module")
+ extract_line("Python-Requires:" ${DUNE_MOD_NAME_UPPERCASE}_PYTHON_REQUIRES "${MODULE_DIR}/dune.module")
+
+ # set module version
+ set(${DUNE_MOD_NAME_UPPERCASE}_VERSION "${DUNE_MOD_VERSION}")
+ set(${DUNE_MOD_NAME_UPPERCASE}_VERSION_MAJOR "${DUNE_VERSION_MAJOR}")
+ set(${DUNE_MOD_NAME_UPPERCASE}_VERSION_MINOR "${DUNE_VERSION_MINOR}")
+ set(${DUNE_MOD_NAME_UPPERCASE}_VERSION_REVISION "${DUNE_VERSION_REVISION}")
+endmacro(dune_module_information)
+
+macro(dune_process_dependency_leafs modules versions is_required next_level_deps
+ next_level_sugs)
+ # modules, and versions are not real variables, make them one
+ set(mmodules ${modules})
+ set(mversions ${versions})
+ list(LENGTH mmodules mlength)
+ list(LENGTH mversions vlength)
+ if(NOT mlength EQUAL vlength)
+ message(STATUS "mmodules=${mmodules} modules=${modules}")
+ message(STATUS "mversions=${mversions} versions=${mversions}")
+ message(FATAL_ERROR "List of modules and versions do not have the same length!")
+ endif()
+ if(mlength GREATER 0)
+ math(EXPR length "${mlength}-1")
+ foreach(i RANGE 0 ${length})
+ list(GET mmodules ${i} _mod)
+ list(GET mversions ${i} _ver)
+ find_dune_package(${_mod} ${is_required} VERSION "${_ver}")
+ set(${_mod}_SEARCHED ON)
+ if(NOT "${is_required}" STREQUAL "")
+ set(${_mod}_REQUIRED ON)
+ set(${next_level_deps} ${${_mod}_DEPENDS} ${${next_level_deps}})
+ else(NOT "${is_required}" STREQUAL "")
+ set(${next_level_sugs} ${${_mod}_DEPENDS} ${${next_level_sugs}})
+ endif()
+ set(${next_level_sugs} ${${_mod}_SUGGESTS} ${${next_level_sugs}})
+ endforeach()
+ endif()
+ if(${next_level_sugs})
+ list(REMOVE_DUPLICATES ${next_level_sugs})
+ endif()
+ if(${next_level_deps})
+ list(REMOVE_DUPLICATES ${next_level_deps})
+ endif()
+endmacro(dune_process_dependency_leafs)
+
+function(remove_processed_modules modules versions is_required)
+ list(LENGTH ${modules} mlength)
+ if(mlength GREATER 0)
+ math(EXPR length "${mlength}-1")
+ foreach(i RANGE ${length} 0 -1)
+ list(GET ${modules} ${i} _mod)
+ if(${_mod}_SEARCHED)
+ list(REMOVE_AT ${modules} ${i})
+ list(REMOVE_AT ${versions} ${i})
+ if(is_required AND NOT ${_mod}_REQUIRED AND NOT ${_mod}_FOUND)
+ message(FATAL_ERROR "Required module ${_mod} not found!")
+ endif()
+ endif()
+ endforeach()
+ endif()
+ set(${modules} ${${modules}} PARENT_SCOPE)
+ set(${versions} ${${versions}} PARENT_SCOPE)
+endfunction(remove_processed_modules modules versions is_required)
+
+macro(dune_create_dependency_leafs depends depends_versions suggests suggests_versions)
+ set(deps "")
+ set(sugs "")
+ #Process dependencies
+ if(NOT "${depends}" STREQUAL "")
+ dune_process_dependency_leafs("${depends}" "${depends_versions}" REQUIRED deps sugs)
+ endif()
+ # Process suggestions
+ if(NOT "${suggests}" STREQUAL "")
+ dune_process_dependency_leafs("${suggests}" "${suggests_versions}" "" deps sugs)
+ endif()
+ split_module_version("${deps}" next_mod_depends next_depends_versions)
+ split_module_version("${sugs}" next_mod_suggests next_suggests_versions)
+ set(ALL_DEPENDENCIES ${ALL_DEPENDENCIES} ${next_mod_depends} ${next_mod_suggests})
+ # Move to next level
+ if(next_mod_suggests OR next_mod_depends)
+ dune_create_dependency_leafs("${next_mod_depends}" "${next_depends_versions}"
+ "${next_mod_suggests}" "${next_suggests_versions}")
+ endif()
+endmacro(dune_create_dependency_leafs)
+
+macro(dune_create_dependency_tree)
+ if(dune-common_MODULE_PATH)
+ list(REMOVE_ITEM CMAKE_MODULE_PATH "${dune-common_MODULE_PATH}")
+ endif()
+ list(FIND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake/modules start)
+ set(ALL_DEPENDENCIES "")
+ if(${ProjectName}_DEPENDS_MODULE OR ${ProjectName}_SUGGESTS_MODULE)
+ set(ALL_DEPENDENCIES ${${ProjectName}_DEPENDS_MODULE} ${${ProjectName}_SUGGESTS_MODULE})
+ dune_create_dependency_leafs("${${ProjectName}_DEPENDS_MODULE}" "${${ProjectName}_DEPENDS_VERSION}"
+ "${${ProjectName}_SUGGESTS_MODULE}" "${${ProjectName}_SUGGESTS_VERSION}")
+ endif()
+ set(_my_path "")
+ if(ALL_DEPENDENCIES)
+ # Reverse the order of the modules and remove duplicates
+ # At end of this clause we have have a list modules
+ # where for each entry all dependencies are before the
+ # module in the list.
+ set(NEW_ALL_DEPS "")
+ list(LENGTH ALL_DEPENDENCIES length)
+ if(length GREATER 0)
+ math(EXPR length "${length}-1")
+ list(GET ALL_DEPENDENCIES ${length} _mod)
+ set(${_mod}_cmake_path_processed 1)
+ set(_my_path ${${_mod}_MODULE_PATH})
+ list(APPEND NEW_ALL_DEPS ${_mod})
+ if(length GREATER 0)
+ math(EXPR length "${length}-1")
+ foreach(i RANGE ${length} 0 -1)
+ list(GET ALL_DEPENDENCIES ${i} _mod)
+ if(NOT ${_mod}_cmake_path_processed)
+ set(${_mod}_cmake_path_processed 1)
+ if(${_mod}_MODULE_PATH)
+ list(INSERT _my_path 0 ${${_mod}_MODULE_PATH})
+ endif()
+ list(APPEND NEW_ALL_DEPS ${_mod})
+ endif()
+ endforeach()
+ endif()
+ list(LENGTH CMAKE_MODULE_PATH length)
+ math(EXPR length "${length}-1")
+ if(start EQUAL -1)
+ list(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake/modules ${_my_path})
+ else()
+ if(start EQUAL ${length})
+ list(APPEND CMAKE_MODULE_PATH ${_my_path})
+ else()
+ if(_my_path)
+ list(INSERT CMAKE_MODULE_PATH ${start} ${_my_path})
+ endif()
+ endif()
+ endif()
+ endif()
+ set(ALL_DEPENDENCIES ${NEW_ALL_DEPS})
+ endif()
+endmacro(dune_create_dependency_tree)
+
+# Converts a module name given by _dune_module into a string _macro_name
+# where all dashes (-) are removed and letters after a dash are capitalized
+# Example: dune-grid-howto -> DuneGridHowto
+macro(dune_module_to_macro _macro_name _dune_module)
+ set(${_macro_name} "")
+ set(_rest "${_dune_module}")
+ string(FIND "${_rest}" "-" _found)
+ while(_found GREATER -1)
+ string(REGEX REPLACE "([^-]*)-.*" "\\1" _first_part
+ "${_rest}")
+ string(REGEX REPLACE "[^-]*-(.*)" "\\1" _rest
+ "${_rest}")
+ string(SUBSTRING "${_first_part}" 0 1 _first_letter)
+ string(SUBSTRING "${_first_part}" 1 -1 _rest_first_part)
+ string(TOUPPER "${_first_letter}" _first_letter)
+ set(${_macro_name} "${${_macro_name}}${_first_letter}${_rest_first_part}")
+ string(FIND "${_rest}" "-" _found)
+ endwhile()
+ string(LENGTH "${_rest}" _length)
+ string(SUBSTRING "${_rest}" 0 1 _first_letter)
+ string(SUBSTRING "${_rest}" 1 -1 _rest)
+ string(TOUPPER "${_first_letter}" _first_letter)
+ set(${_macro_name} "${${_macro_name}}${_first_letter}${_rest}")
+endmacro(dune_module_to_macro _macro_name _dune_module)
+
+macro(dune_process_dependency_macros)
+ foreach(_mod ${ALL_DEPENDENCIES} ${ProjectName})
+ if(NOT ${_mod}_PROCESSED)
+ # module not processed yet
+ set(${_mod}_PROCESSED ${_mod})
+ # Search for a cmake files containing tests and directives
+ # specific to this module
+ dune_module_to_macro(_cmake_mod_name "${_mod}")
+ set(_macro "${_cmake_mod_name}Macros")
+ set(_mod_cmake _mod_cmake-NOTFOUND) # Prevent false positives due to caching
+ message(STATUS "Searching for macro file '${_macro}' for module '${_mod}'.")
+ find_file(_mod_cmake
+ NAMES "${_macro}.cmake"
+ PATHS ${CMAKE_MODULE_PATH}
+ NO_DEFAULT_PATH
+ NO_CMAKE_FIND_ROOT_PATH)
+ if(_mod_cmake)
+ message(STATUS "Performing tests specific to ${_mod} from file ${_mod_cmake}.")
+ include(${_mod_cmake})
+ else()
+ message(STATUS "No module specific tests performed for module '${_mod}' because macro file '${_macro}.cmake' not found in ${CMAKE_MODULE_PATH}.")
+ endif()
+ dune_module_to_uppercase(_upper_case "${_mod}")
+ if(${_mod}_INCLUDE_DIRS)
+ message(STATUS "Setting ${_mod}_INCLUDE_DIRS=${${_mod}_INCLUDE_DIRS}")
+ include_directories("${${_mod}_INCLUDE_DIRS}")
+ endif()
+ if(${_mod}_LIBRARIES)
+ message(STATUS "Setting ${_mod}_LIBRARIES=${${_mod}_LIBRARIES}")
+ foreach(_lib ${${_mod}_LIBRARIES})
+ list(INSERT DUNE_DEFAULT_LIBS 0 "${_lib}")
+ list(INSERT DUNE_LIBS 0 "${_lib}")
+ endforeach()
+ endif()
+
+ # register dune module
+ dune_register_package_flags(INCLUDE_DIRS "${${_mod}_INCLUDE_DIRS}")
+ endif()
+ endforeach()
+endmacro(dune_process_dependency_macros)
+
+# macro that should be called near the begin of the top level CMakeLists.txt.
+# Namely it sets up the module, defines basic variables and manages
+# depedencies.
+# Don't forget to call finalize_dune_project afterwards.
+macro(dune_project)
+
+ # check if CXX flag overloading has been enabled (see OverloadCompilerFlags.cmake)
+ initialize_compiler_script()
+
+ # extract information from dune.module
+ dune_module_information(${PROJECT_SOURCE_DIR})
+ set(ProjectName "${DUNE_MOD_NAME}")
+ set(ProjectVersion "${DUNE_MOD_VERSION}")
+ set(ProjectVersionString "${DUNE_VERSION_MAJOR}.${DUNE_VERSION_MINOR}.${DUNE_VERSION_REVISION}")
+ set(ProjectVersionMajor "${DUNE_VERSION_MAJOR}")
+ set(ProjectVersionMinor "${DUNE_VERSION_MINOR}")
+ set(ProjectVersionRevision "${DUNE_VERSION_REVISION}")
+ set(ProjectMaintainerEmail "${DUNE_MAINTAINER}")
+ set(ProjectDescription "${${DUNE_MOD_NAME_UPPERCASE}_DESCRIPTION}")
+ set(ProjectAuthor "${${DUNE_MOD_NAME_UPPERCASE}_AUTHOR}")
+ set(ProjectUrl "${${DUNE_MOD_NAME_UPPERCASE}_URL}")
+ set(ProjectPythonRequires "${${DUNE_MOD_NAME_UPPERCASE}_PYTHON_REQUIRES}")
+
+ # check whether this module has been explicitly disabled through the cmake flags.
+ # If so, stop the build. This is necessary because dunecontrol does not parse
+ # the given CMake flags for a disabled Dune modules.
+ if(CMAKE_DISABLE_FIND_PACKAGE_${ProjectName})
+ message("Module ${ProjectName} has been explicitly disabled through the cmake flags. Skipping build.")
+ return()
+ endif()
+
+ define_property(GLOBAL PROPERTY DUNE_MODULE_LIBRARIES
+ BRIEF_DOCS "List of libraries of the module. DO NOT EDIT!"
+ FULL_DOCS "List of libraries of the module. Used to generate CMake's package configuration files. DO NOT EDIT!")
+ dune_create_dependency_tree()
+
+ # assert the project names matches
+ if(NOT (ProjectName STREQUAL PROJECT_NAME))
+ message(FATAL_ERROR "Module name from dune.module does not match the name given in CMakeLists.txt.")
+ endif()
+
+ # As default request position independent code if shared libraries are built
+ # This should allow DUNE modules to use CMake's object libraries.
+ # This can be overwritten for targets by setting the target property
+ # POSITION_INDEPENDENT_CODE to false/OFF
+ include(CMakeDependentOption)
+ cmake_dependent_option(CMAKE_POSITION_INDEPENDENT_CODE "Build position independent code" ON "NOT BUILD_SHARED_LIBS" ON)
+
+ # check for C++ features, set compiler flags for C++14 or C++11 mode
+ include(CheckCXXFeatures)
+
+ # set include path and link path for the current project.
+ include_directories("${PROJECT_BINARY_DIR}")
+ include_directories("${PROJECT_SOURCE_DIR}")
+ include_directories("${CMAKE_CURRENT_BINARY_DIR}")
+ include_directories("${CMAKE_CURRENT_SOURCE_DIR}")
+ add_definitions(-DHAVE_CONFIG_H)
+
+ # Create custom target for building the documentation
+ # and provide macros for installing the docs and force
+ # building them before.
+ include(DuneDoc)
+
+ # activate pkg-config
+ include(DunePkgConfig)
+
+ # Process the macros provided by the dependencies and ourself
+ dune_process_dependency_macros()
+
+ include(GNUInstallDirs)
+ # Set variable where the cmake modules will be installed.
+ # Thus the user can override it and for example install
+ # directly into the CMake installation. We use a cache variable
+ # that is overridden by a local variable of the same name if
+ # the user does not explicitely set a value for it. Thus the value
+ # will automatically change if the user changes CMAKE_INSTALL_DATAROOTDIR
+ # or CMAKE_INSTALL_PREFIX
+ if(NOT DUNE_INSTALL_MODULEDIR)
+ set(DUNE_INSTALL_MODULEDIR ""
+ CACHE PATH
+ "Installation directory for CMake modules. Default is \${CMAKE_INSTALL_DATAROOTDIR}/dune/cmake/modules when not set explicitely")
+ set(DUNE_INSTALL_MODULEDIR ${CMAKE_INSTALL_DATAROOTDIR}/dune/cmake/modules)
+ endif()
+ if(NOT DUNE_INSTALL_NONOBJECTLIBDIR)
+ set(DUNE_INSTALL_NONOBJECTLIBDIR ""
+ CACHE PATH
+ "Installation directory for libraries that are not architecture dependent. Default is lib when not set explicitely")
+ set(DUNE_INSTALL_NONOBJECTLIBDIR lib)
+ endif()
+ # set up make headercheck
+ include(Headercheck)
+ setup_headercheck()
+
+endmacro(dune_project)
+
+# create a new config.h file and overwrite the existing one
+macro(dune_regenerate_config_cmake)
+ set(CONFIG_H_CMAKE_FILE "${PROJECT_BINARY_DIR}/config_collected.h.cmake")
+ if(EXISTS ${PROJECT_SOURCE_DIR}/config.h.cmake)
+ file(READ ${PROJECT_SOURCE_DIR}/config.h.cmake _file)
+ string(REGEX MATCH
+ "/[\\*/][ ]*begin[ ]+${ProjectName}.*\\/[/\\*][ ]*end[ ]*${ProjectName}[^\\*]*\\*/"
+ _myfile "${_file}")
+ endif()
+ # overwrite file with new content
+ file(WRITE ${CONFIG_H_CMAKE_FILE} "/* config.h. Generated from config_collected.h.cmake by CMake.
+ It was generated from config_collected.h.cmake which in turn is generated automatically
+ from the config.h.cmake files of modules this module depends on. */"
+ )
+
+ # define that we found this module
+ set(${ProjectName}_FOUND 1)
+ foreach(_dep ${ProjectName} ${ALL_DEPENDENCIES})
+ dune_module_to_uppercase(upper ${_dep})
+ set(HAVE_${upper} ${${_dep}_FOUND})
+ file(APPEND ${CONFIG_H_CMAKE_FILE}
+ "\n\n/* Define to 1 if you have module ${_dep} available */
+#cmakedefine01 HAVE_${upper}\n")
+ endforeach()
+
+ # add previous module specific section
+ foreach(_dep ${ALL_DEPENDENCIES})
+ foreach(_mod_conf_file ${${_dep}_PREFIX}/config.h.cmake
+ ${${_dep}_PREFIX}/share/${_dep}/config.h.cmake)
+ if(EXISTS ${_mod_conf_file})
+ file(READ "${_mod_conf_file}" _file)
+ string(REGEX REPLACE
+ ".*/\\*[ ]*begin[ ]+${_dep}[^\\*]*\\*/(.*)/[/\\*][ ]*end[ ]*${_dep}[^\\*]*\\*/" "\\1"
+ _tfile "${_file}")
+ # strip the private section
+ string(REGEX REPLACE "(.*)/[\\*][ ]*begin private.*/[\\*][ ]*end[ ]*private[^\\*]\\*/(.*)" "\\1\\2" _ttfile "${_tfile}")
+
+ # extract the bottom section
+ string(REGEX MATCH "/[\\*][ ]*begin bottom.*/[\\*][ ]*end[ ]*bottom[^\\*]\\*/" _tbottom "${_ttfile}")
+ string(REGEX REPLACE ".*/\\*[ ]*begin[ ]+bottom[^\\*]*\\*/(.*)/[/\\*][ ]*end[ ]*bottom[^\\*]*\\*/" "\\1" ${_dep}_CONFIG_H_BOTTOM "${_tbottom}" )
+ string(REGEX REPLACE "(.*)/[\\*][ ]*begin bottom.*/[\\*][ ]*end[ ]*bottom[^\\*]\\*/(.*)" "\\1\\2" _file "${_ttfile}")
+
+ # append bottom section
+ if(${_dep}_CONFIG_H_BOTTOM)
+ set(CONFIG_H_BOTTOM "${CONFIG_H_BOTTOM} ${${_dep}_CONFIG_H_BOTTOM}")
+ endif()
+
+ file(APPEND ${CONFIG_H_CMAKE_FILE} "${_file}")
+ endif()
+ endforeach()
+ endforeach()
+ # parse again dune.module file of current module to set PACKAGE_* variables
+ dune_module_information(${PROJECT_SOURCE_DIR} QUIET)
+ file(APPEND ${CONFIG_H_CMAKE_FILE} "\n${_myfile}")
+ # append CONFIG_H_BOTTOM section at the end if found
+ if(CONFIG_H_BOTTOM)
+ file(APPEND ${CONFIG_H_CMAKE_FILE} "${CONFIG_H_BOTTOM}")
+ endif()
+endmacro(dune_regenerate_config_cmake)
+
+# macro that should be called at the end of the top level CMakeLists.txt.
+# Namely it creates config.h and the cmake-config files,
+# some install directives and exports the module.
+macro(finalize_dune_project)
+ if(DUNE_SYMLINK_TO_SOURCE_TREE)
+ dune_symlink_to_source_tree()
+ endif()
+
+ #configure all headerchecks
+ finalize_headercheck()
+
+ #create cmake-config files for installation tree
+ include(CMakePackageConfigHelpers)
+ include(GNUInstallDirs)
+ set(DOXYSTYLE_DIR ${CMAKE_INSTALL_DATAROOTDIR}/dune-common/doc/doxygen/)
+ set(SCRIPT_DIR ${CMAKE_INSTALL_DATAROOTDIR}/dune/cmake/scripts)
+ # Set the location where the doc sources are installed.
+ # Needed by custom package configuration
+ # file section of dune-grid.
+ set(DUNE_MODULE_SRC_DOCDIR "\${${ProjectName}_PREFIX}/${CMAKE_INSTALL_DOCDIR}")
+
+ if(NOT EXISTS ${PROJECT_SOURCE_DIR}/cmake/pkg/${ProjectName}-config.cmake.in)
+ # Generate a standard cmake package configuration file
+ file(WRITE ${PROJECT_BINARY_DIR}/CMakeFiles/${ProjectName}-config.cmake.in
+"if(NOT ${ProjectName}_FOUND)
+# Whether this module is installed or not
+set(${ProjectName}_INSTALLED @MODULE_INSTALLED@)
+
+# Settings specific to the module
+@${ProjectName}_INIT@
+# Package initialization
+@PACKAGE_INIT@
+
+#report other information
+set_and_check(${ProjectName}_PREFIX \"\${PACKAGE_PREFIX_DIR}\")
+set_and_check(${ProjectName}_INCLUDE_DIRS \"@PACKAGE_CMAKE_INSTALL_INCLUDEDIR@\")
+set(${ProjectName}_CXX_FLAGS \"${CMAKE_CXX_FLAGS}\")
+set(${ProjectName}_CXX_FLAGS_DEBUG \"${CMAKE_CXX_FLAGS_DEBUG}\")
+set(${ProjectName}_CXX_FLAGS_MINSIZEREL \"${CMAKE_CXX_FLAGS_MINSIZEREL}\")
+set(${ProjectName}_CXX_FLAGS_RELEASE \"${CMAKE_CXX_FLAGS_RELEASE}\")
+set(${ProjectName}_CXX_FLAGS_RELWITHDEBINFO \"${CMAKE_CXX_FLAGS_RELWITHDEBINFO}\")
+set(${ProjectName}_DEPENDS \"@${ProjectName}_DEPENDS@\")
+set(${ProjectName}_SUGGESTS \"@${ProjectName}_SUGGESTS@\")
+set(${ProjectName}_MODULE_PATH \"@PACKAGE_DUNE_INSTALL_MODULEDIR@\")
+set(${ProjectName}_LIBRARIES \"@DUNE_MODULE_LIBRARIES@\")
+
+# Lines that are set by the CMake build system via the variable DUNE_CUSTOM_PKG_CONFIG_SECTION
+${DUNE_CUSTOM_PKG_CONFIG_SECTION}
+
+#import the target
+if(${ProjectName}_LIBRARIES)
+ get_filename_component(_dir \"\${CMAKE_CURRENT_LIST_FILE}\" PATH)
+ include(\"\${_dir}/${ProjectName}-targets.cmake\")
+endif()
+endif()")
+ set(CONFIG_SOURCE_FILE ${PROJECT_BINARY_DIR}/CMakeFiles/${ProjectName}-config.cmake.in)
+ else()
+ set(CONFIG_SOURCE_FILE ${PROJECT_SOURCE_DIR}/cmake/pkg/${ProjectName}-config.cmake.in)
+ endif()
+ get_property(DUNE_MODULE_LIBRARIES GLOBAL PROPERTY DUNE_MODULE_LIBRARIES)
+
+ # compute under which libdir the package configuration files are to be installed.
+ # If the module installs an object library we use CMAKE_INSTALL_LIBDIR
+ # to capture the multiarch triplet of Debian/Ubuntu.
+ # Otherwise we fall back to DUNE_INSTALL_NONOBJECTLIB which is lib
+ # if not set otherwise.
+ get_property(DUNE_MODULE_LIBRARIES GLOBAL PROPERTY DUNE_MODULE_LIBRARIES)
+ if(DUNE_MODULE_LIBRARIES)
+ set(DUNE_INSTALL_LIBDIR ${CMAKE_INSTALL_LIBDIR})
+ else()
+ set(DUNE_INSTALL_LIBDIR ${DUNE_INSTALL_NONOBJECTLIBDIR})
+ endif()
+
+ # Set the location of the doc file source. Needed by custom package configuration
+ # file section of dune-grid.
+ set(DUNE_MODULE_SRC_DOCDIR "${PROJECT_SOURCE_DIR}/doc")
+ set(MODULE_INSTALLED ON)
+
+ configure_package_config_file(${CONFIG_SOURCE_FILE}
+ ${PROJECT_BINARY_DIR}/cmake/pkg/${ProjectName}-config.cmake
+ INSTALL_DESTINATION ${DUNE_INSTALL_LIBDIR}/cmake/${ProjectName}
+ PATH_VARS CMAKE_INSTALL_DATAROOTDIR DUNE_INSTALL_MODULEDIR CMAKE_INSTALL_INCLUDEDIR
+ DOXYSTYLE_DIR SCRIPT_DIR)
+
+
+ #create cmake-config files for build tree
+ set(PACKAGE_CMAKE_INSTALL_INCLUDEDIR ${PROJECT_SOURCE_DIR})
+ set(PACKAGE_CMAKE_INSTALL_DATAROOTDIR ${PROJECT_BINARY_DIR})
+ set(PACKAGE_DOXYSTYLE_DIR ${PROJECT_SOURCE_DIR}/doc/doxygen)
+ set(PACKAGE_SCRIPT_DIR ${PROJECT_SOURCE_DIR}/cmake/scripts)
+ set(PACKAGE_DUNE_INSTALL_MODULEDIR ${PROJECT_SOURCE_DIR}/cmake/modules)
+ set(PACKAGE_PREFIX_DIR ${PROJECT_BINARY_DIR})
+ set(PACKAGE_INIT "# Set prefix to source dir
+set(PACKAGE_PREFIX_DIR ${PROJECT_SOURCE_DIR})
+macro(set_and_check _var _file)
+ set(\${_var} \"\${_file}\")
+ if(NOT EXISTS \"\${_file}\")
+ message(FATAL_ERROR \"File or directory \${_file} referenced by variable \${_var} does not exist !\")
+ endif()
+endmacro()")
+ set(MODULE_INSTALLED OFF)
+ configure_file(
+ ${CONFIG_SOURCE_FILE}
+ ${PROJECT_BINARY_DIR}/${ProjectName}-config.cmake @ONLY)
+
+ if(NOT EXISTS ${PROJECT_SOURCE_DIR}/${ProjectName}-config-version.cmake.in)
+ file(WRITE ${PROJECT_BINARY_DIR}/CMakeFiles/${ProjectName}-config-version.cmake.in
+"set(PACKAGE_VERSION \"${ProjectVersionString}\")
+
+if(\"\${PACKAGE_FIND_VERSION_MAJOR}\" EQUAL \"${ProjectVersionMajor}\" AND
+ \"\${PACKAGE_FIND_VERSION_MINOR}\" EQUAL \"${ProjectVersionMinor}\")
+ set (PACKAGE_VERSION_COMPATIBLE 1) # compatible with newer
+ if (\"\${PACKAGE_FIND_VERSION}\" VERSION_EQUAL \"${ProjectVersionString}\")
+ set(PACKAGE_VERSION_EXACT 1) #exact match for this version
+ endif()
+endif()
+")
+ set(CONFIG_VERSION_FILE ${PROJECT_BINARY_DIR}/CMakeFiles/${ProjectName}-config-version.cmake.in)
+ else()
+ set(CONFIG_VERSION_FILE ${PROJECT_SOURCE_DIR}/${ProjectName}-config-version.cmake.in)
+ endif()
+ configure_file(
+ ${CONFIG_VERSION_FILE}
+ ${PROJECT_BINARY_DIR}/${ProjectName}-config-version.cmake @ONLY)
+
+ # install dune.module file
+ install(FILES dune.module DESTINATION ${DUNE_INSTALL_NONOBJECTLIBDIR}/dunecontrol/${ProjectName})
+
+ # install cmake-config files
+ install(FILES ${PROJECT_BINARY_DIR}/cmake/pkg/${ProjectName}-config.cmake
+ ${PROJECT_BINARY_DIR}/${ProjectName}-config-version.cmake
+ DESTINATION ${DUNE_INSTALL_LIBDIR}/cmake/${ProjectName})
+
+ # install config.h
+ if(EXISTS ${PROJECT_SOURCE_DIR}/config.h.cmake)
+ install(FILES config.h.cmake DESTINATION share/${ProjectName})
+ endif()
+
+ # install pkg-config files
+ create_and_install_pkconfig(${DUNE_INSTALL_LIBDIR})
+
+ if("${ARGC}" EQUAL "1")
+ message(STATUS "Adding custom target for config.h generation")
+ dune_regenerate_config_cmake()
+ # add a target to generate config.h.cmake
+ if(NOT TARGET OUTPUT)
+ add_custom_target(OUTPUT config_collected.h.cmake
+ COMMAND dune_regenerate_config_cmake())
+ endif()
+ # actually write the config.h file to disk
+ # using generated file
+ configure_file(${CMAKE_CURRENT_BINARY_DIR}/config_collected.h.cmake
+ ${CMAKE_CURRENT_BINARY_DIR}/config.h)
+ else()
+ message(STATUS "Not adding custom target for config.h generation")
+ # actually write the config.h file to disk
+ configure_file(config.h.cmake ${CMAKE_CURRENT_BINARY_DIR}/config.h)
+ endif()
+
+ if(PROJECT_NAME STREQUAL CMAKE_PROJECT_NAME)
+ feature_summary(WHAT ALL)
+ endif()
+
+ # check if CXX flag overloading has been enabled
+ # and write compiler script (see OverloadCompilerFlags.cmake)
+ finalize_compiler_script()
+endmacro(finalize_dune_project)
+
+macro(target_link_dune_default_libraries _target)
+ foreach(_lib ${DUNE_DEFAULT_LIBS})
+ target_link_libraries(${_target} PUBLIC ${_lib})
+ endforeach()
+endmacro(target_link_dune_default_libraries)
+
+function(dune_expand_object_libraries _SOURCES_var _ADD_LIBS_var _COMPILE_FLAGS_var)
+ set(_new_SOURCES "")
+ set(_new_ADD_LIBS "${${_ADD_LIBS_var}}")
+ set(_new_COMPILE_FLAGS "${${_COMPILE_FLAGS_var}}")
+ set(_regex "_DUNE_TARGET_OBJECTS:([a-zA-Z0-9_-]+)_")
+ foreach(_source ${${_SOURCES_var}})
+ string(REGEX MATCH ${_regex} _matched "${_source}")
+ if(_matched)
+ string(REGEX REPLACE "${_regex}" "\\1" _basename "${_source}")
+ foreach(var _SOURCES _ADD_LIBS _COMPILE_FLAGS)
+ get_property(_prop GLOBAL PROPERTY DUNE_LIB_${_basename}${var})
+ list(APPEND _new${var} "${_prop}")
+ endforeach()
+ else()
+ list(APPEND _new_SOURCES "${_source}")
+ endif()
+ endforeach()
+
+ foreach(var _SOURCES _ADD_LIBS _COMPILE_FLAGS)
+ set(${${var}_var} "${_new${var}}" PARENT_SCOPE)
+ endforeach()
+endfunction(dune_expand_object_libraries)
+
+# Creates shared and static libraries with the same basename.
+# More docu can be found at the top of this file.
+macro(dune_add_library basename)
+ include(CMakeParseArguments)
+ cmake_parse_arguments(DUNE_LIB "APPEND;NO_EXPORT;OBJECT" "COMPILE_FLAGS"
+ "ADD_LIBS;SOURCES" ${ARGN})
+ list(APPEND DUNE_LIB_SOURCES ${DUNE_LIB_UNPARSED_ARGUMENTS})
+ if(DUNE_LIB_OBJECT)
+ if(DUNE_LIB_${basename}_SOURCES)
+ message(FATAL_ERROR "There is already a library with the name ${basename}, "
+ "but only one is allowed!")
+ else()
+ foreach(source ${DUNE_LIB_SOURCES})
+ list(APPEND full_path_sources ${CMAKE_CURRENT_SOURCE_DIR}/${source})
+ endforeach()
+ # register sources, libs and flags for building the library later
+ define_property(GLOBAL PROPERTY DUNE_LIB_${basename}_SOURCES
+ BRIEF_DOCS "Convenience property with sources for library ${basename}. DO NOT EDIT!"
+ FULL_DOCS "Convenience property with sources for library ${basename}. DO NOT EDIT!")
+ set_property(GLOBAL PROPERTY DUNE_LIB_${basename}_SOURCES
+ "${full_path_sources}")
+ define_property(GLOBAL PROPERTY DUNE_LIB_${basename}_ADD_LIBS
+ BRIEF_DOCS "Convenience property with libraries for library ${basename}. DO NOT EDIT!"
+ FULL_DOCS "Convenience property with libraries for library ${basename}. DO NOT EDIT!")
+ set_property(GLOBAL PROPERTY DUNE_LIB_${basename}_ADD_LIBS
+ "${DUNE_LIB_ADD_LIBS}")
+ define_property(GLOBAL PROPERTY DUNE_LIB_${basename}_COMPILE_FLAGS
+ BRIEF_DOCS "Convenience property with compile flags for library ${basename}. DO NOT EDIT!"
+ FULL_DOCS "Convenience property with compile flags for library ${basename}. DO NOT EDIT!")
+ set_property(GLOBAL PROPERTY DUNE_LIB_${basename}_COMPILE_FLAGS
+ "${DUNE_LIB_COMPILE_FLAGS}")
+ endif()
+ else(DUNE_LIB_OBJECT)
+ dune_expand_object_libraries(DUNE_LIB_SOURCES DUNE_LIB_ADD_LIBS DUNE_LIB_COMPILE_FLAGS)
+ #create lib
+ add_library(${basename} ${DUNE_LIB_SOURCES})
+ get_property(_prop GLOBAL PROPERTY DUNE_MODULE_LIBRARIES)
+ set_property(GLOBAL PROPERTY DUNE_MODULE_LIBRARIES ${_prop} ${basename})
+ # link with specified libraries.
+ if(DUNE_LIB_ADD_LIBS)
+ target_link_libraries(${basename} PUBLIC "${DUNE_LIB_ADD_LIBS}")
+ endif()
+ if(DUNE_LIB_COMPILE_FLAGS)
+ set_property(${basename} APPEND_STRING COMPILE_FLAGS
+ "${DUNE_LIB_COMPILE_FLAGS}")
+ endif()
+ # Build library in ${PROJECT_BINARY_DIR}/lib
+ set_target_properties(${basename} PROPERTIES
+ LIBRARY_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}/lib"
+ ARCHIVE_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}/lib")
+
+ if(NOT DUNE_LIB_NO_EXPORT)
+ # The following allows for adding multiple libs in the same
+ # directory or below with passing the APPEND keyword.
+ # If there are additional calls to dune_add_library in other
+ # modules then you have to use APPEND or otherwise only the
+ # last lib will get exported as a target.
+ if(NOT _MODULE_EXPORT_USED)
+ set(_MODULE_EXPORT_USED ON)
+ set(_append "")
+ else()
+ set(_append APPEND)
+ endif()
+ # Allow to explicitly pass APPEND
+ if(DUNE_LIB_APPEND)
+ set(_append APPEND)
+ endif()
+
+ # install targets to use the libraries in other modules.
+ install(TARGETS ${basename}
+ EXPORT ${ProjectName}-targets DESTINATION ${CMAKE_INSTALL_LIBDIR})
+ install(EXPORT ${ProjectName}-targets
+ DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/${ProjectName})
+
+ # export libraries for use in build tree
+ export(TARGETS ${basename} ${_append}
+ FILE ${PROJECT_BINARY_DIR}/${ProjectName}-targets.cmake)
+ endif()
+ endif()
+endmacro(dune_add_library basename sources)
+
+macro(replace_properties_for_one)
+ get_property(properties ${option_command} ${_target}
+ PROPERTY ${REPLACE_PROPERTY})
+ if(NOT properties)
+ # property not set. set it directly
+ foreach(i RANGE 0 ${hlength})
+ math(EXPR idx "(2 * ${i}) + 1")
+ list(GET REPLACE_UNPARSED_ARGUMENTS ${idx} repl)
+ list(APPEND replacement ${repl})
+ endforeach()
+ list(REMOVE_DUPLICATES replacement)
+ set_property(${option_command} ${_target} ${REPLACE_APPEND}
+ ${REPLACE_APPEND_STRING} PROPERTY ${REPLACE_PROPERTY} ${replacement})
+ else()
+ foreach(prop ${properties})
+ set(matched FALSE)
+ foreach(i RANGE 0 ${hlength})
+ math(EXPR regexi "2 * ${i}")
+ math(EXPR repli "${regexi} +1")
+ list(GET REPLACE_UNPARSED_ARGUMENTS ${regexi} regex)
+ list(GET REPLACE_UNPARSED_ARGUMENTS ${repli} replacement)
+ string(REGEX MATCH ${regex} match ${prop})
+
+ if(match)
+ list(APPEND new_props ${replacement})
+ set(matched TRUE)
+ endif()
+ endforeach()
+
+ if(NOT matched)
+ list(APPEND new_props ${prop})
+ endif()
+ endforeach()
+ list(REMOVE_DUPLICATES new_props)
+ set_property(${option_command} ${_target}
+ PROPERTY ${REPLACE_PROPERTY} ${new_props})
+ endif()
+ get_property(properties ${option_command} ${_target} PROPERTY ${REPLACE_PROPERTY})
+endmacro(replace_properties_for_one)
+
+function(dune_target_link_libraries basename libraries)
+ target_link_libraries(${basename} PUBLIC ${libraries})
+endfunction(dune_target_link_libraries basename libraries)
+
+function(replace_properties)
+ include(CMakeParseArguments)
+ set(_first_opts "GLOBAL;DIRECTORY;TARGET;SOURCE;CACHE")
+ cmake_parse_arguments(REPLACE "GLOBAL"
+ "DIRECTORY;PROPERTY" "TARGET;SOURCE;TEST;CACHE" ${ARGN})
+
+ set(MY_DIRECTORY TRUE)
+ foreach(i ${_first_opts})
+ if(REPLACE_${i})
+ set(MY_DIRECTORY FALSE)
+ endif()
+ endforeach()
+ if(NOT MY_DIRECTORY)
+ list(FIND REPLACE_UNPARSED_ARGUMENTS DIRECTORY _found)
+ if(_found GREATER -1)
+ list(REMOVE_AT REPLACE_UNPARSED_ARGUMENTS ${_found})
+ set(MY_DIRECTORY TRUE)
+ set(REPLACE_DIRECTORY "")
+ endif()
+ endif()
+
+ # setup options
+ if(REPLACE_GLOBAL)
+ set(option_command GLOBAL)
+ elseif(MY_DIRECTORY)
+ set(option_command DIRECTORY)
+ elseif(REPLACE_DIRECTORY)
+ set(option_command DIRECTORY)
+ set(option_arg ${REPLACE_DIRECTORY})
+ elseif(REPLACE_TARGET)
+ set(option_command TARGET)
+ set(option_arg ${REPLACE_TARGET})
+ elseif(REPLACE_SOURCE)
+ set(option_command SOURCE)
+ set(option_arg ${REPLACE_SOURCE})
+ elseif(REPLACE_TEST)
+ set(option_command TEST)
+ set(option_arg${REPLACE_TEST})
+ elseif(REPLACE_CACHE)
+ set(option_command CACHE)
+ set(option_arg ${REPLACE_CACHE})
+ endif()
+
+ if(NOT (REPLACE_CACHE OR REPLACE_TEST OR REPLACE_SOURCE
+ OR REPLACE_TARGET OR REPLACE_DIRECTORY OR REPLACE_GLOBAL
+ OR MY_DIRECTORY))
+ message(ERROR "One of GLOBAL, DIRECTORY, TARGET, SOURCE, TEST, or CACHE"
+ " has to be present")
+ endif()
+
+ list(LENGTH REPLACE_UNPARSED_ARGUMENTS length)
+# if(NOT (REPLACE_GLOBAL AND REPLACE_TARGET AND
+# REPLACE_SOURCE AND REPLACE
+ math(EXPR mlength "${length} % 2 ")
+ math(EXPR hlength "${length} / 2 - 1")
+
+ if(NOT ${mlength} EQUAL 0)
+ message(ERROR "You need to specify pairs consisting of a regular expression and a replacement string.")
+ endif()
+
+ if(NOT length GREATER 0)
+ message(ERROR "You need to specify at least on pair consisting of a regular expression
+and a replacement string. ${REPLACE_UNPARSED_ARGUMENTS}")
+ endif()
+
+ foreach(_target ${option_arg})
+ replace_properties_for_one()
+ endforeach()
+
+ list(LENGTH option_arg _length)
+ if(_length EQUAL 0)
+ replace_properties_for_one()
+ endif()
+endfunction(replace_properties)
+
+macro(add_dune_all_flags targets)
+ get_property(incs GLOBAL PROPERTY ALL_PKG_INCS)
+ get_property(defs GLOBAL PROPERTY ALL_PKG_DEFS)
+ get_property(libs GLOBAL PROPERTY ALL_PKG_LIBS)
+ get_property(opts GLOBAL PROPERTY ALL_PKG_OPTS)
+ foreach(target ${targets})
+ set_property(TARGET ${target} APPEND PROPERTY INCLUDE_DIRECTORIES ${incs})
+ set_property(TARGET ${target} APPEND PROPERTY COMPILE_DEFINITIONS ${defs})
+ target_link_libraries(${target} PUBLIC ${DUNE_LIBS} ${libs})
+ target_compile_options(${target} PUBLIC ${opts})
+ endforeach()
+endmacro(add_dune_all_flags targets)
--- /dev/null
+# Some helper functions for people developing the CMake build system
+# to get quick and easy access to path variables of Dune modules.
+#
+# .. cmake_function:: dune_module_path
+#
+# .. cmake_param:: MODULE
+# :single:
+# :required:
+#
+# The name of the module.
+#
+# .. cmake_param:: RESULT
+# :single:
+# :required:
+#
+# The name of the variable to export the result.
+#
+# .. cmake_param:: CMAKE_MODULES
+# :option:
+#
+# Set to return the path to cmake modules
+#
+# .. cmake_param:: BUILD_DIR
+# :option:
+#
+# Set to return the path to the build directory
+#
+# .. cmake_param:: SOURCE_DIR
+# :option:
+#
+# Set to return the include path of the module
+#
+# .. cmake_param:: SCRIPT_DIR
+# :option:
+#
+# Set to return the CMake script dir
+#
+#
+# Returns the specified path of the given module. This differs
+# whether it is called from the actual module, or from a module
+# requiring or suggesting this module. One and only one type of path
+# may be requested.
+#
+include_guard(GLOBAL)
+
+function(dune_module_path)
+ # Parse Arguments
+ set(OPTION CMAKE_MODULES BUILD_DIR SOURCE_DIR SCRIPT_DIR)
+ set(SINGLE MODULE RESULT)
+ set(MULTI)
+ include(CMakeParseArguments)
+ cmake_parse_arguments(PATH "${OPTION}" "${SINGLE}" "${MULTI}" ${ARGN})
+ if(PATH_UNPARSED_ARGUMENTS)
+ message(WARNING "Unparsed arguments in dune_module_path: This often indicates typos!")
+ endif()
+
+ # Check whether one and only one path type was set.
+ set(OPTION_FOUND 0)
+ foreach(opt ${OPTION})
+ if(${PATH_${opt}})
+ if(OPTION_FOUND)
+ message(FATAL_ERROR "Cannot request two different paths from dune_module_path")
+ else()
+ set(OPTION_FOUND 1)
+ endif()
+ endif()
+ endforeach()
+ if(NOT OPTION_FOUND)
+ message(FATAL_ERROR "Cannot determine type of requested path!")
+ endif()
+
+ # Set the requested paths for the cmake module path
+ if(PATH_CMAKE_MODULES)
+ set(IF_CURRENT_MOD ${PROJECT_SOURCE_DIR}/cmake/modules)
+ set(IF_NOT_CURRENT_MOD ${${PATH_MODULE}_MODULE_PATH})
+ endif()
+
+ # Set the requested paths for the cmake script path
+ if(PATH_SCRIPT_DIR)
+ set(IF_CURRENT_MOD ${PROJECT_SOURCE_DIR}/cmake/scripts)
+ set(IF_NOT_CURRENT_MOD ${${PATH_MODULE}_SCRIPT_DIR})
+ endif()
+
+ # Set the requested paths for the build directory
+ if(PATH_BUILD_DIR)
+ set(IF_CURRENT_MOD ${PROJECT_BINARY_DIR})
+ set(IF_NOT_CURRENT_MOD ${${PATH_MODULE}_DIR})
+ endif()
+
+ # Set the requested paths for the include directory
+ if(PATH_SOURCE_DIR)
+ set(IF_CURRENT_MOD ${PROJECT_SOURCE_DIR})
+ set(IF_NOT_CURRENT_MOD ${${PATH_MODULE}_PREFIX})
+ endif()
+
+ # Now set the path in the outer scope!
+ if(PROJECT_NAME STREQUAL ${PATH_MODULE})
+ set(${PATH_RESULT} ${IF_CURRENT_MOD} PARENT_SCOPE)
+ else()
+ set(${PATH_RESULT} ${IF_NOT_CURRENT_MOD} PARENT_SCOPE)
+ endif()
+endfunction()
--- /dev/null
+# searches for pkg-config, creates the
+# file <module-name>.pc from <module-name>.pc.in,
+# and adds installation directives.
+#
+include_guard(GLOBAL)
+
+find_package(PkgConfig)
+# text for feature summary
+set_package_properties("PkgConfig" PROPERTIES
+ DESCRIPTION "Unified interface for querying installed libraries"
+ PURPOSE "To find Dune module dependencies")
+
+function(create_and_install_pkconfig installlibdir)
+ # set some variables that are used in the pkg-config file
+ include(GNUInstallDirs)
+
+ if(SKBUILD)
+ # we are using scikit-build to build a python wheel. The install prefix
+ # set by scikit is within a tmp directory (isolated build) and
+ # therefore not suitable for the prefix in the pc file. At least when
+ # installed into a virtual env the correct prefix path is two below the
+ # location of the pc file, i.e.,
+ # location of pc files: dune-env/lib/pkgconfig
+ # location of dune.module files: dune-env/lib/dunecontrol
+ # and from the documentation
+ # installed module: ${path}/lib/dunecontrol/${name}/dune.module
+ # and there is a file ${path}/lib/pkgconfig/${name}.pc
+ set( prefix "\${pcfiledir}/../..")
+ else()
+ set( prefix ${CMAKE_INSTALL_PREFIX})
+ endif()
+
+ set(exec_prefix "\${prefix}")
+ set(libdir "\${exec_prefix}/${installlibdir}")
+ set(includedir "\${prefix}/${CMAKE_INSTALL_INCLUDEDIR}")
+ set(PACKAGE_NAME ${ProjectName})
+ set(VERSION ${ProjectVersion})
+ set(CC ${CMAKE_C_COMPILER})
+ set(CXX "${CMAKE_CXX_COMPILER} ${CXX_STD11_FLAGS}")
+
+ if(DUNE_DEPENDS)
+ foreach(_DUNE_DEPEND ${DUNE_DEPENDS})
+ string(REGEX REPLACE "\\(" "" REQF1 ${_DUNE_DEPEND})
+ string(REGEX REPLACE "\\)" "" LR ${REQF1})
+ if(REQUIRES)
+ set(REQUIRES "${REQUIRES} ${LR}")
+ else()
+ set(REQUIRES ${LR})
+ endif(REQUIRES)
+ endforeach(_DUNE_DEPEND ${DUNE_DEPENDS})
+ endif(DUNE_DEPENDS)
+
+ #create pkg-config file
+ configure_file(
+ ${PROJECT_SOURCE_DIR}/${ProjectName}.pc.in
+ ${PROJECT_BINARY_DIR}/${ProjectName}.pc
+ @ONLY
+ )
+
+ # install pkgconfig file
+ install(FILES ${CMAKE_CURRENT_BINARY_DIR}/${ProjectName}.pc
+ DESTINATION ${installlibdir}/pkgconfig)
+
+endfunction(create_and_install_pkconfig)
--- /dev/null
+# The python extension of the Dune cmake build system
+#
+# .. cmake_module::
+#
+# This module is the main entry point for the python extension of the Dune cmake
+# build system. It handles the detection of the python installation, defines installation
+# rules for python packages in Dune modules and provides virtual environments to
+# run python code from cmake.
+#
+# If you want to use Dune modules that provide Python functionality, you should be aware
+# of some facts:
+#
+# * CMake looks for your python interpreter during configure. If you want to have it
+# work with a virtual environment, you should activate your virtualenv before configure.
+# * Each module has an additional target :code:`make install_python`, that installs python packages
+# defined in the Dune module. You can customize the install location with
+# :ref:`DUNE_PYTHON_INSTALL_LOCATION`. This is also included in :code:`make install`.
+# * There is additional functionality, that automatically sets up a virtual environment
+# at configure time, you can read more at :ref:`DunePythonVirtualenv`.
+#
+# After the module :code:`DunePythonCommonMacros` is run (which happens automatically when
+# configuring dune-common) the following python-related variables will be set and available
+# for use in downstream modules:
+#
+# * All variables set by :code:`FindPythonInterp.cmake` and :code:`FindPythonLibs.cmake`
+# * :code:`DUNE_PYTHON_SYSTEM_IS_VIRTUALENV`: True if the given system interpreter resides in
+# virtual environment.
+#
+# For documentation on how to customize the build process, check the input variable
+# reference for any variables prefixed with :code:`DUNE_PYTHON`. To learn how to write build
+# system code for Dune modules shipping python, have a look at the command reference for
+# commands prefixed :code:`dune_python`.
+#
+# .. cmake_variable:: DUNE_PYTHON_INSTALL_LOCATION
+#
+# This variable can be used to control where Dune should install python
+# packages. Possible values are:
+#
+# * :code:`user`: installs into the users home directory through :code:`pip --user`. Note, that
+# this is incompatible with using virtual environments (as per pip docs).
+# * :code:`system`: into the standard paths of the interpreter which was found
+# by cmake.
+# * :code:`none`: Never install any python packages.
+#
+# The default value in use depends on the system interpreter to run in a virtual environment
+# or not: If it does, :code:`system` is the default, if it does not :code:`none` is the default.
+# This rather unintuitive default originates from the strong belief, that installing
+# python packages into the system locations at :code:`/usr/...` should be discouraged.
+#
+# .. cmake_variable:: DUNE_PYTHON_VIRTUALENV_SETUP
+#
+# Set this variable to allow the Dune build system to set up a virtualenv at
+# configure time. Such virtual environment is very useful, whenever python code
+# is to be run at configure time, i.e. to implement code generation in Python or
+# to use Python wrappers in testing. Some downstream modules will *require* you
+# to set this variable. When setting this variable, you allow the Dune buildsystem
+# to install packages through :code:`pip` into a virtualenv, that resides in a cmake
+# build directory. For all the information on this virtualenv, see :ref:`DunePythonVirtualenv`.
+#
+# .. cmake_function:: dune_python_require_virtualenv_setup
+#
+# Call this function from a downstream module, if that module relies on the
+# the presence of the configure time virtualenv described in :ref:`DunePythonVirtualenv`.
+#
+include_guard(GLOBAL)
+
+# unless the user has defined the variable, unversioned names (like python3) are found
+# first, to match what users most probably use later on to call the executable
+if(NOT DEFINED Python3_FIND_UNVERSIONED_NAMES)
+ set(Python3_FIND_UNVERSIONED_NAMES "FIRST")
+endif()
+
+# include code from CMake 3.20 to back-port using unversioned Python first
+if(${CMAKE_VERSION} VERSION_LESS "3.20")
+ list(INSERT CMAKE_MODULE_PATH 0 "${CMAKE_CURRENT_LIST_DIR}/FindPython3")
+endif()
+
+# Include all the other parts of the python extension to avoid that users need
+# to explicitly include parts of our build system.
+include(DunePythonFindPackage)
+include(DunePythonInstallPackage)
+include(DunePythonTestCommand)
+
+# Find the Python Interpreter and libraries
+find_package(Python3 COMPONENTS Interpreter Development)
+
+
+# Determine whether the given interpreter is running inside a virtualenv
+if(Python3_Interpreter_FOUND)
+ include(DuneExecuteProcess)
+ include(DunePathHelper)
+ dune_module_path(MODULE dune-common
+ RESULT scriptdir
+ SCRIPT_DIR)
+
+ dune_execute_process(COMMAND "${Python3_EXECUTABLE}" "${scriptdir}/envdetect.py"
+ RESULT_VARIABLE DUNE_PYTHON_SYSTEM_IS_VIRTUALENV
+ )
+endif()
+
+# Determine where to install python packages
+if(NOT DUNE_PYTHON_INSTALL_LOCATION)
+ if(DUNE_PYTHON_SYSTEM_IS_VIRTUALENV)
+ set(DUNE_PYTHON_INSTALL_LOCATION "system")
+ else()
+ set(DUNE_PYTHON_INSTALL_LOCATION "none")
+ endif()
+endif()
+if(NOT(("${DUNE_PYTHON_INSTALL_LOCATION}" STREQUAL "user") OR
+ ("${DUNE_PYTHON_INSTALL_LOCATION}" STREQUAL "system") OR
+ ("${DUNE_PYTHON_INSTALL_LOCATION}" STREQUAL "none")))
+ message(FATAL_ERROR "DUNE_PYTHON_INSTALL_LOCATION must be user|system|none.")
+endif()
+if(("${DUNE_PYTHON_INSTALL_LOCATION}" STREQUAL "user") AND
+ DUNE_PYTHON_SYSTEM_IS_VIRTUALENV)
+ message(FATAL_ERROR "Specifying 'user' as install location is incomaptible with using virtual environments (as per pip docs)")
+endif()
+
+# Check presence of python packages required by the buildsystem
+dune_python_find_package(PACKAGE pip)
+
+# Add python related meta targets
+add_custom_target(test_python)
+add_custom_target(install_python)
+
+# Set the path to a Dune wheelhouse that is to be used during installation
+# NB: Right now, the same logic is used to retrieve the location of the
+# wheelhouse (which means that you have to use the same CMAKE_INSTALL_PREFIX
+# when *using* installed modules, you used when *installing* them.
+# TODO: Replace this with a better mechanism (like writing the location into
+# dune-commons package config file)
+set(DUNE_PYTHON_WHEELHOUSE ${CMAKE_INSTALL_PREFIX}/share/dune/wheelhouse)
+
+# Have make install do the same as make install_python
+install(CODE "set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH})
+ set(DUNE_PYTHON_WHEELHOUSE ${DUNE_PYTHON_WHEELHOUSE})
+ include(DuneExecuteProcess)
+ dune_execute_process(COMMAND \"${CMAKE_COMMAND}\" --build . --target install_python --config $<CONFIG>)
+ ")
+
+# Implement a check for the presence of the virtualenv
+function(dune_python_require_virtualenv_setup)
+ if(NOT DUNE_PYTHON_VIRTUALENV_SETUP)
+ message(FATAL_ERROR "\n
+ ${PROJECT_NAME} relies on a configure-time virtual environment being
+ set up by the Dune python build system. You have to set the CMake variable
+ DUNE_PYTHON_VIRTUALENV_SETUP to allow that.\n
+ ")
+ endif()
+endfunction()
+
+# If requested, switch into DunePythonVirtualenv.cmake and setup the virtualenv.
+if(DUNE_PYTHON_VIRTUALENV_SETUP)
+ include(DunePythonVirtualenv)
+endif()
+
+# marcos used for the Python bindings
+include(DunePythonMacros)
--- /dev/null
+# This module provides functions to check for the existence of python packages on the host system.
+#
+# .. cmake_function:: dune_python_find_package
+#
+# .. cmake_param:: PACKAGE
+# :required:
+# :single:
+#
+# The package name to look for.
+#
+# .. cmake_param: RESULT
+# :single:
+#
+# The variable to store the result of the check in
+# in the calling scope. Defaults to :code:`DUNE_PYTHON_<package>_FOUND`
+# Note that the package name is case sensitive and will
+# usually be lowercase.
+#
+# .. cmake_param:: REQUIRED
+# :option:
+#
+# If set, the function will error out if the package is not
+# found.
+#
+# .. cmake_param:: VERSION
+# :single:
+#
+# The minimum version of the package that is required.
+#
+# .. cmake_param:: EXACT
+# :option:
+#
+# Whether the given version requirement has to be matched exactly.
+#
+# .. cmake_param:: INTERPRETER
+# :single:
+#
+# The python interpreter, whose paths are searched for the package.
+# Defaults to :code:`${Python3_EXECUTABLE}`, might differ when dealing with
+# the configure-time virtualenv set up with :ref:`DUNE_PYTHON_VIRTUALENV_SETUP`.
+#
+# Find a given python package on the system.
+#
+include_guard(GLOBAL)
+
+function(dune_python_find_package)
+ # Parse Arguments
+ set(OPTION REQUIRED EXACT)
+ set(SINGLE PACKAGE RESULT VERSION INTERPRETER)
+ set(MULTI)
+ include(CMakeParseArguments)
+ cmake_parse_arguments(PYPACKAGE "${OPTION}" "${SINGLE}" "${MULTI}" ${ARGN})
+ if(PYCHECK_UNPARSED_ARGUMENTS)
+ message(WARNING "Unparsed arguments in dune_python_find_package: This often indicates typos!")
+ endif()
+
+ # Do error checking on input and apply defaults
+ if(NOT PYPACKAGE_RESULT)
+ set(PYPACKAGE_RESULT DUNE_PYTHON_${PYPACKAGE_PACKAGE}_FOUND)
+ endif()
+ if(NOT PYPACKAGE_INTERPRETER)
+ set(PYPACKAGE_INTERPRETER "${Python3_EXECUTABLE}")
+ endif()
+ if(PYPACKAGE_EXACT AND NOT PYPACKAGE_VERSION)
+ message(FATAL_ERROR "dune_python_find_package: EXACT given, but no VERSION specified.")
+ endif()
+
+ # Do the actual check
+ execute_process(COMMAND "${PYPACKAGE_INTERPRETER}" -c "import ${PYPACKAGE_PACKAGE}"
+ RESULT_VARIABLE PYPACKAGE_RETURN
+ ERROR_QUIET)
+
+ # Perform additional checks
+ if(PYPACKAGE_RETURN STREQUAL "0")
+ include(DunePathHelper)
+ dune_module_path(MODULE dune-common
+ RESULT scriptdir
+ SCRIPT_DIR)
+
+ # Check the found version of the given python package
+ # We cannot use find_package_handle_standard_args for that, as it is too
+ # closely tied to using find_package(), which we cannot use for variable package
+ # name...
+ execute_process(COMMAND "${PYPACKAGE_INTERPRETER}" "${scriptdir}/pyversion.py" "${PYPACKAGE_PACKAGE}"
+ RESULT_VARIABLE retcode
+ OUTPUT_VARIABLE VERSION_STRING
+ OUTPUT_STRIP_TRAILING_WHITESPACE
+ )
+
+ set(${PYPACKAGE_RESULT} TRUE)
+ if("${retcode}" STREQUAL "0")
+ if(("${VERSION_STRING}" VERSION_LESS "${PYPACKAGE_VERSION}") OR
+ (PYPACKAGE_EXACT AND NOT ("${VERSION_STRING}" VERSION_EQUAL "${PYPACKAGE_VERSION}")))
+ set(${PYPACKAGE_RESULT} FALSE)
+ endif()
+ else()
+ set(VERSION_STRING "unknown version")
+ if(PYPACKAGE_VERSION)
+ set(${PYPACKAGE_RESULT} FALSE)
+ endif()
+ endif()
+ else()
+ set(${PYPACKAGE_RESULT} FALSE)
+ if(PYPACKAGE_REQUIRED)
+ message(FATAL_ERROR "The python package ${PYPACKAGE_PACKAGE} could not be found! (for interpreter ${PYPACKAGE_INTERPRETER})")
+ endif()
+ endif()
+
+ # Set the result variable and print the result
+ include(FindPackageHandleStandardArgs)
+ find_package_handle_standard_args(${PYPACKAGE_PACKAGE}_${PYPACKAGE_INTERPRETER}
+ "Failed to find the python package ${PYPACKAGE_PACKAGE} with interpreter ${PYPACKAGE_INTERPRETER}."
+ ${PYPACKAGE_RESULT}
+ )
+ set(${PYPACKAGE_RESULT} ${${PYPACKAGE_RESULT}} PARENT_SCOPE)
+endfunction()
--- /dev/null
+# This cmake module provides infrastructure for cmake installation rules concerning python packages.
+#
+# .. cmake_function:: dune_python_install_package
+#
+# .. cmake_param:: PATH
+# :required:
+# :single:
+#
+# Relative path to the given python package source code.
+#
+# .. cmake_param:: ADDITIONAL_PIP_PARAMS
+# :multi:
+# :argname: param
+#
+# Parameters to add to any :code:`pip install` call (appended).
+#
+# This function installs the python package located at the given path. It
+#
+# * installs it to the location specified with :ref:`DUNE_PYTHON_INSTALL_LOCATION` during
+# :code:`make install_python` and during :code:`make install`.
+# * installs a wheel into the Dune wheelhouse during :code:`make install`.
+# This is necessary for mixing installed and non-installed Dune modules.
+#
+# The package at the given location is expected to be a pip-installable package.
+#
+# .. cmake_variable:: DUNE_PYTHON_INSTALL_EDITABLE
+#
+# Set this variable to have all installations of python packages use
+# :code:`pip --editable`.
+#
+#
+# .. cmake_variable:: DUNE_PYTHON_ADDITIONAL_PIP_PARAMS
+#
+# Use this variable to set additional flags for pip in this build. This can e.g.
+# be used to point pip to alternative package indices in restricted environments.
+#
+include_guard(GLOBAL)
+
+function(dune_python_install_package)
+ # Parse Arguments
+ set(OPTION)
+ set(SINGLE PATH)
+ set(MULTI ADDITIONAL_PIP_PARAMS)
+ include(CMakeParseArguments)
+ cmake_parse_arguments(PYINST "${OPTION}" "${SINGLE}" "${MULTI}" ${ARGN})
+ if(PYINST_UNPARSED_ARGUMENTS)
+ message(WARNING "Unparsed arguments in dune_python_install_package: This often indicates typos!")
+ endif()
+
+ set(PYINST_FULLPATH ${CMAKE_CURRENT_SOURCE_DIR}/${PYINST_PATH})
+ if(EXISTS ${PYINST_FULLPATH}/setup.py.in)
+ configure_file(${PYINST_PATH}/setup.py.in ${PYINST_PATH}/setup.py)
+ set(PYINST_FULLPATH ${CMAKE_CURRENT_BINARY_DIR}/${PYINST_PATH})
+ set(PYINST_PUREPYTHON FALSE)
+ elseif(EXISTS ${PYINST_FULLPATH}/setup.py)
+ set(PYINST_PUREPYTHON TRUE)
+ else()
+ message(FATAL_ERROR "dune_python_install_package: Requested installations, but neither setup.py nor setup.py.in found!")
+ endif()
+
+ # Find out whether we should install in editable mode
+ set(INSTALL_EDITABLE ${DUNE_PYTHON_INSTALL_EDITABLE})
+
+ # Construct the wheel house installation option string
+ set(WHEEL_OPTION "")
+ if(IS_DIRECTORY ${DUNE_PYTHON_WHEELHOUSE})
+ set(WHEEL_OPTION "--find-links=${DUNE_PYTHON_WHEELHOUSE}")
+ #
+ # The following line is a bummer!
+ # We cannot have editable packages once we start using global installations!
+ # This is related to the nightmare that is https://github.com/pypa/pip/issues/3
+ #
+ set(INSTALL_EDITABLE FALSE)
+ endif()
+
+ # Construct the editable option string
+ set(EDIT_OPTION "")
+ if(INSTALL_EDITABLE)
+ set(EDIT_OPTION "-e")
+ endif()
+
+ # Construct the installation location option string
+ set(INSTALL_OPTION "")
+ if("${DUNE_PYTHON_INSTALL_LOCATION}" STREQUAL "user")
+ set(INSTALL_OPTION "--user")
+ endif()
+
+ set(INSTALL_CMDLINE -m pip install
+ "${INSTALL_OPTION}" --upgrade "${WHEEL_OPTION}" "${EDIT_OPTION}" ${PYINST_ADDITIONAL_PIP_PARAMS} ${DUNE_PYTHON_ADDITIONAL_PIP_PARAMS}
+ "${PYINST_FULLPATH}")
+
+
+ # Leave this function if no installation rules are required
+ if("${DUNE_PYTHON_INSTALL_LOCATION}" STREQUAL "none" AND NOT DUNE_PYTHON_VIRTUALENV_SETUP)
+ return()
+ endif()
+
+ # Check for the presence of the pip package
+ if(NOT DUNE_PYTHON_pip_FOUND)
+ message(FATAL_ERROR "dune_python_install_package: Requested installations, but pip was not found!")
+ endif()
+
+ #
+ # If requested, install into the configure-time Dune virtualenv
+ #
+
+ if(PYINST_PUREPYTHON AND DUNE_PYTHON_VIRTUALENV_SETUP)
+ message("-- Installing python package at ${CMAKE_CURRENT_SOURCE_DIR}/${PYINST_PATH} into the virtualenv...")
+ dune_execute_process(COMMAND "${DUNE_PYTHON_VIRTUALENV_EXECUTABLE}" "${INSTALL_CMDLINE}"
+ ERROR_MESSAGE "dune_python_install_package: Error installing into virtualenv!")
+ endif()
+
+ #
+ # Now define rules for `make install_python`.
+ #
+
+ # Leave this function if no installation rules are required
+ if("${DUNE_PYTHON_INSTALL_LOCATION}" STREQUAL "none")
+ return()
+ endif()
+
+ dune_module_path(MODULE dune-common
+ RESULT scriptdir
+ SCRIPT_DIR)
+
+ # Determine a target name for installing this package
+ string(REPLACE "/" "_" targetname "install_python_${CMAKE_CURRENT_SOURCE_DIR}_${PYINST_PATH}")
+
+ # Add a custom target that globally installs this package if requested
+ add_custom_target(${targetname}
+ COMMAND ${Python3_EXECUTABLE} ${INSTALL_CMDLINE}
+ COMMENT "Installing the python package at ${PYINST_FULLPATH}"
+ )
+
+ add_dependencies(install_python ${targetname})
+
+ # Define rules for `make install` that install a wheel into a central wheelhouse
+ #
+ # NB: This is necessary, to allow mixing installed and non-installed modules
+ # with python packages. The wheelhouse will allow to install any missing
+ # python packages into a virtual environment.
+ #
+
+ # Construct the wheel installation commandline
+ set(WHEEL_COMMAND ${Python3_EXECUTABLE} -m pip wheel -w ${DUNE_PYTHON_WHEELHOUSE} ${PYINST_ADDITIONAL_PIP_PARAMS} ${DUNE_PYTHON_ADDITIONAL_PIP_PARAMS} ${PYINST_FULLPATH})
+
+ # Add the installation rule
+ install(CODE "message(\"Installing wheel for python package at ${PYINST_FULLPATH}...\")
+ dune_execute_process(COMMAND ${WHEEL_COMMAND}
+ )"
+ )
+endfunction()
--- /dev/null
+# this option enables the build of Python bindings for DUNE modules
+option(DUNE_ENABLE_PYTHONBINDINGS "Enable Python bindings for DUNE" OFF)
+
+if( DUNE_ENABLE_PYTHONBINDINGS )
+ if(NOT Python3_Interpreter_FOUND)
+ message(FATAL_ERROR "Python bindings require a Python 3 interpreter")
+ endif()
+ if(NOT Python3_INCLUDE_DIRS)
+ message(FATAL_ERROR "Found a Python interpreter but the Python bindings also requires the Python libraries (a package named like python-dev package or python3-devel)")
+ endif()
+
+ include_directories("${Python3_INCLUDE_DIRS}")
+
+ function(add_python_targets base)
+ include(DuneSymlinkOrCopy)
+ if(PROJECT_SOURCE_DIR STREQUAL PROJECT_BINARY_DIR)
+ message(WARNING "Source and binary dir are the same, skipping symlink!")
+ else()
+ foreach(file ${ARGN})
+ dune_symlink_to_source_files(FILES ${file}.py)
+ endforeach()
+ endif()
+ endfunction()
+
+ include(DuneAddPybind11Module)
+
+ # Add a custom command that triggers the configuration of dune-py
+ add_custom_command(TARGET install_python POST_BUILD
+ COMMAND ${Python3_EXECUTABLE} -m dune configure
+ )
+
+endif()
--- /dev/null
+# Wrap python testing commands into the CMake build system
+#
+# .. cmake_function:: dune_python_add_test
+#
+# .. cmake_param:: SCRIPT
+# :multi:
+#
+# The script to execute using the python interpreter. It will be executed during :code:`make test_python`
+# and during `ctest`. You are required to either pass SCRIPT or MODULE.
+#
+# .. note::
+#
+# The script will be executed using :code:`${Python3_EXECUTABLE} SCRIPT`. If the INTERPRETER
+# option is given, that interpreter is used instead.
+#
+# .. cmake_param:: MODULE
+# :multi:
+#
+# The Python module to be executed. It will be executed during :code:`make test_python`
+# and during `ctest`. You are required to either pass SCRIPT or MODULE.
+#
+# .. note::
+#
+# The script will be executed using :code:`${Python3_EXECUTABLE} -m MODULE`. If the INTERPRETER
+# option is given, that interpreter is used instead.
+#
+# .. cmake_param:: INTERPRETER
+# :single:
+#
+# The Python interpreter to use for this test. It defaults to the one found by CMake.
+#
+# .. cmake_param:: WORKING_DIRECTORY
+# :single:
+# :argname: dir
+#
+# The working directory of the command. Defaults to
+# the current build directory.
+#
+# .. cmake_param:: NAME
+# :single:
+#
+# A name to identify this test in ctest. Names must be unique throughout
+# the project. If omitted, defaults to mangling of the command.
+#
+# Integrates a python testing framework command into the Dune
+# build system. Added commands are run, when the target
+# :code:`test_python` is built and during :code:`ctest`.
+#
+include_guard(GLOBAL)
+
+function(dune_python_add_test)
+ # Parse Arguments
+ include(CMakeParseArguments)
+ set(OPTION)
+ set(SINGLE WORKING_DIRECTORY NAME INTERPRETER)
+ set(MULTI SCRIPT COMMAND LABELS MODULE)
+
+ cmake_parse_arguments(PYTEST "${OPTION}" "${SINGLE}" "${MULTI}" ${ARGN})
+ if(PYTEST_COMMAND)
+ message(FATAL_ERROR "dune_python_add_test: COMMAND argument should not be used, use SCRIPT instead providing only the Python script and not the Python interpreter")
+ endif()
+ if(PYTEST_UNPARSED_ARGUMENTS)
+ message(WARNING "Unparsed arguments in dune_python_add_test: This often indicates typos!")
+ endif()
+
+ # Apply defaults
+ if(NOT PYTEST_INTERPRETER)
+ set(PYTEST_INTERPRETER ${Python3_EXECUTABLE})
+ endif()
+ if(NOT PYTEST_WORKING_DIRECTORY)
+ set(PYTEST_WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
+ endif()
+ if((NOT PYTEST_MODULE) AND (NOT PYTEST_SCRIPT))
+ message(FATAL_ERROR "dune_python_add_test: Either SCRIPT or MODULE need to be specified!")
+ endif()
+ if(PYTEST_MODULE AND PYTEST_SCRIPT)
+ message(FATAL_ERROR "dune_python_add_test: You can only specify either SCRIPT or MODULE, not both!")
+ endif()
+ if(PYTEST_MODULE)
+ set(PYTEST_SCRIPT -m ${PYTEST_MODULE})
+ endif()
+ if(NOT PYTEST_NAME)
+ set(commandstr "")
+ foreach(comm ${PYTEST_SCRIPT})
+ set(commandstr "${commandstr}_${comm}")
+ endforeach()
+ set(commandstr "${commandstr}_${PYTEST_WORKING_DIRECTORY}")
+ string(REPLACE "/" "_" PYTEST_NAME ${commandstr})
+ endif()
+
+ # Actually run the command
+ add_custom_target(target_${PYTEST_NAME}
+ COMMAND ${PYTEST_INTERPRETER} ${PYTEST_SCRIPT}
+ WORKING_DIRECTORY ${PYTEST_WORKING_DIRECTORY})
+
+ # Build this during make test_python
+ add_dependencies(test_python target_${PYTEST_NAME})
+
+ # make sure each label exists and its name is acceptable
+ dune_declare_test_label(LABELS ${PYTEST_LABELS})
+ # Also build this during ctest
+ _add_test(NAME ${PYTEST_NAME}
+ COMMAND ${PYTEST_INTERPRETER} ${PYTEST_SCRIPT}
+ WORKING_DIRECTORY ${PYTEST_WORKING_DIRECTORY}
+ )
+ # Set the labels on the test
+ set_tests_properties(${PYTEST_NAME} PROPERTIES LABELS "${PYTEST_LABELS}")
+endfunction()
--- /dev/null
+# Manage the creation of a configure-time virtual environment
+#
+# .. cmake_module::
+#
+# This module manages the creation of virtual python environment during
+# configuration. Execution of this module must be explicitly enabled by
+# setting the variable :ref:`DUNE_PYTHON_VIRTUALENV_SETUP`. Note that some
+# downstream modules will require you to set this variable. The purpose
+# of this virtual environment is to be able to run python code from cmake
+# in situations such as python-based code generation, running postprocessing
+# in python during testing etc.
+#
+# Although designed for internal use, this virtualenv can also be manually
+# inspected. A symlink to the activation script is placed in the top level
+# build directory of all Dune modules in the stack. To directly execute a
+# command in the virtualenv, you can use the script :code:`run-in-dune-env <command>`,
+# which is also placed into every build directory.
+#
+# All packages installed with :ref:`dune_python_install_package` are automatically
+# installed into the virtualenv.
+#
+# After execution of this module, the following are available for use in
+# downstream modules:
+#
+# * :code:`DUNE_PYTHON_VIRTUALENV_PATH` The path of the virtual environment
+# * :code:`DUNE_PYTHON_VIRTUALENV_EXECUTABLE` The python interpreter in the virtual environment
+#
+# By default, the created virtualenv resides in the first non-installed Dune module of
+# the module stack (if no installation is performed: dune-common). Be aware
+# that mixing installed and non-installed modules may result in a situation,
+# where multiple such environments are created, although only one should.
+# You can change this behavior by either specifying a fixed path for the virtualenv
+# using :ref:`DUNE_PYTHON_VIRTUALENV_PATH` or by enabling
+# :ref:`DUNE_PYTHON_EXTERNAL_VIRTUALENV_FOR_ABSOLUTE_BUILDDIR` if you are using an
+# absolute build directory with dunecontrol. Note that this flag is enabled by default
+# starting from Dune 2.7.
+#
+# .. cmake_variable:: DUNE_PYTHON_VIRTUALENV_PATH
+#
+# When the Dune build system has setup a virtualenv, this variable will contain its location.
+# You can also set this variable to a fixed path when CMake, and the virtualenv will be placed
+# at that location.
+#
+# .. cmake_variable:: DUNE_PYTHON_EXTERNAL_VIRTUALENV_FOR_ABSOLUTE_BUILDDIR
+#
+# Before Dune 2.7, the virtualenv was always placed inside the build directory of the first
+# non-installed Dune module that the current module depends on. When using installed core modules
+# or a multi-stage installation process, this can lead to situations where there are multiple
+# virtualenvs, making it impossible to find all Python modules installed by upstream modules.
+# In order to avoid this problem at least for builds using an absolute build directory (i.e., the
+# :code:`--builddir` option of dunecontrol refers to an absolute path), the build system will
+# place the virtualenv in a dedicated directory :code:`dune-python-env` inside that absolute
+# build directory, where it will be found by all Dune modules. If you want to disable this
+# behavior, set :code:`DUNE_PYTHON_EXTERNAL_VIRTUALENV_FOR_ABSOLUTE_BUILDDIR=0`.
+#
+# .. cmake_variable:: DUNE_PYTHON_ALLOW_GET_PIP
+#
+# The Dune build system will try to build a virtualenv with pip installed into it,
+# but this can fail in some situations, in particular on Debian and Ubuntu distributions.
+# In this case, you will se a warning message in the CMake output. If you are on Debian
+# or Ubuntu, try installing the :code:`python3-venv` (for Python 3) and / or
+# :code:`python-virtualenv` packages, delete your build directory and try configuring
+# again.
+#
+# If that still does not help, set this variable to allow the Dune build system to download
+# :code:`get-pip.py` from https://bootstrap.pypa.io/get-pip.py at configure time and execute
+# it to install pip into the freshly set up virtual environment. While this should normally
+# not be necessary anymore, see https://bugs.launchpad.net/debian/+source/python3.4/+bug/1290847
+# for more information about the underlying distribution bug.
+#
+include_guard(GLOBAL)
+
+# If the user has not specified an absolute, we look through the dependency tree of this module
+# for a build directory that already contains a virtual environment.
+
+set(DUNE_PYTHON_VIRTUALENV_PATH "" CACHE PATH
+ "Location of Python virtualenv created by the Dune build system"
+ )
+
+# pre-populate DUNE_PYTHON_EXTERNAL_VIRTUALENV_FOR_ABSOLUTE_BUILDDIR
+set(DUNE_PYTHON_EXTERNAL_VIRTUALENV_FOR_ABSOLUTE_BUILDDIR ON CACHE BOOL
+ "Place Python virtualenv in top-level directory \"dune-python-env\" when using an absolute build directory"
+ )
+
+if(DUNE_PYTHON_VIRTUALENV_PATH STREQUAL "")
+ foreach(mod ${ALL_DEPENDENCIES})
+ if(IS_DIRECTORY ${${mod}_DIR}/dune-env)
+ set(DUNE_PYTHON_VIRTUALENV_PATH ${${mod}_DIR}/dune-env)
+ break()
+ endif()
+ endforeach()
+
+ # if we haven't found it yet, check in the current build directory - this might be a reconfigure
+ if(DUNE_PYTHON_VIRTUALENV_PATH STREQUAL "")
+ if(IS_DIRECTORY ${CMAKE_BINARY_DIR}/dune-env)
+ set(DUNE_PYTHON_VIRTUALENV_PATH ${CMAKE_BINARY_DIR}/dune-env)
+ endif()
+ endif()
+endif()
+
+
+if(DUNE_PYTHON_VIRTUALENV_PATH STREQUAL "")
+ # We didn't find anything, so figure out the correct location for building the virtualenv
+
+ if(DUNE_PYTHON_EXTERNAL_VIRTUALENV_FOR_ABSOLUTE_BUILDDIR AND DUNE_BUILD_DIRECTORY_ROOT_PATH)
+ # Use a dedicated directory not associated with any module
+ set(DUNE_PYTHON_VIRTUALENV_PATH "${DUNE_BUILD_DIRECTORY_ROOT_PATH}/dune-python-env")
+ else()
+ # Create the virtualenv inside our build directory
+ set(DUNE_PYTHON_VIRTUALENV_PATH ${CMAKE_BINARY_DIR}/dune-env)
+ endif()
+endif()
+
+# If it does not yet exist, set it up!
+if(NOT IS_DIRECTORY "${DUNE_PYTHON_VIRTUALENV_PATH}")
+ # Check for presence of the virtualenv/venv package
+ dune_python_find_package(PACKAGE virtualenv)
+ dune_python_find_package(PACKAGE venv)
+ if(NOT(DUNE_PYTHON_virtualenv_FOUND OR DUNE_PYTHON_venv_FOUND))
+ message(FATAL_ERROR "One of the python packages virtualenv/venv is needed on the host system!")
+ endif()
+
+ # Set some options depending on which virtualenv package is used
+ if(DUNE_PYTHON_venv_FOUND)
+ set(VIRTUALENV_PACKAGE_NAME venv)
+ set(NOPIP_OPTION --without-pip)
+ set(INTERPRETER_OPTION "")
+ endif()
+ if(DUNE_PYTHON_virtualenv_FOUND)
+ set(VIRTUALENV_PACKAGE_NAME virtualenv)
+ set(NOPIP_OPTION --no-pip)
+ set(INTERPRETER_OPTION -p "${Python3_EXECUTABLE}")
+ endif()
+
+ if(("${VIRTUALENV_PACKAGE_NAME}" STREQUAL "venv") AND DUNE_PYTHON_SYSTEM_IS_VIRTUALENV)
+ message("-- WARNING: You are using a system interpreter which is a virtualenv and the venv package.")
+ message(" You might want to consider installing the virtualenv package if you experience inconveniences.")
+ endif()
+
+ # Set up the env itself
+ message("-- Building a virtualenv in ${DUNE_PYTHON_VIRTUALENV_PATH}")
+ # First, try to build it with pip installed, but only if the user has not set DUNE_PYTHON_ALLOW_GET_PIP
+ if(NOT DUNE_PYTHON_ALLOW_GET_PIP)
+ dune_execute_process(COMMAND ${Python3_EXECUTABLE}
+ -m ${VIRTUALENV_PACKAGE_NAME}
+ ${INTERPRETER_OPTION}
+ "${DUNE_PYTHON_VIRTUALENV_PATH}"
+ RESULT_VARIABLE venv_install_result
+ )
+ endif()
+
+ if(NOT "${venv_install_result}" STREQUAL "0")
+
+ if(NOT DUNE_PYTHON_ALLOW_GET_PIP)
+ # we attempted the default installation before, so issue a warning
+ message("-- WARNING: Failed to build a virtual env with pip installed, trying again without pip")
+ message("-- If you are using Debian or Ubuntu, consider installing python3-venv and / or python-virtualenv")
+ endif()
+
+ # remove the remainder of a potential first attempt
+ file(REMOVE_RECURSE "${DUNE_PYTHON_VIRTUALENV_PATH}")
+
+ # try to build the env without pip
+ dune_execute_process(COMMAND ${Python3_EXECUTABLE}
+ -m ${VIRTUALENV_PACKAGE_NAME}
+ ${INTERPRETER_OPTION}
+ ${NOPIP_OPTION}
+ "${DUNE_PYTHON_VIRTUALENV_PATH}"
+ ERROR_MESSAGE "Fatal error when setting up a virtualenv."
+ )
+ endif()
+
+else()
+ message("-- Using existing virtualenv in ${DUNE_PYTHON_VIRTUALENV_PATH}")
+endif()
+
+# Also store the virtual env interpreter directly
+set(DUNE_PYTHON_VIRTUALENV_EXECUTABLE ${DUNE_PYTHON_VIRTUALENV_PATH}/bin/python)
+
+# Write a symlink for activation of the environment into all the
+# build directories of the Dune stack
+dune_execute_process(COMMAND ${CMAKE_COMMAND} -E create_symlink ${DUNE_PYTHON_VIRTUALENV_PATH}/bin/activate ${CMAKE_BINARY_DIR}/activate)
+
+# Also write a small wrapper script 'run-in-dune-env' into the build directory
+# This is necessary to execute installed python scripts (the bin path of a virtualenv
+# is *not* in the sys path, so a simple `python scriptname` does not work.
+if(UNIX)
+ find_package(UnixCommands QUIET)
+ dune_module_path(MODULE dune-common
+ RESULT scriptdir
+ SCRIPT_DIR)
+ configure_file(${scriptdir}/run-in-dune-env.sh.in
+ ${CMAKE_BINARY_DIR}/run-in-dune-env
+ @ONLY)
+else()
+ message(WARNING "Writing script 'run-in-dune-env' not implemented on your platform!")
+endif()
+
+# The virtualenv might not contain pip due to the distribution bug described in
+# https://bugs.launchpad.net/debian/+source/python3.4/+bug/1290847
+# We need to install pip, so if pip is missing, we offer to download and run the get-pip
+# script. We ask users for permission to do so, or we allow them to set it up themselves.
+
+dune_python_find_package(PACKAGE pip
+ RESULT pippresent
+ INTERPRETER ${DUNE_PYTHON_VIRTUALENV_EXECUTABLE}
+ )
+if(NOT pippresent)
+ if(DUNE_PYTHON_ALLOW_GET_PIP)
+ # Fetch the get-pip.py script
+ message("-- Installing pip using https://bootstrap.pypa.io/get-pip.py...")
+ file(DOWNLOAD https://bootstrap.pypa.io/get-pip.py ${CMAKE_CURRENT_BINARY_DIR}/get-pip.py)
+
+ # Verify that the script was successfully fetched
+ file(READ ${CMAKE_CURRENT_BINARY_DIR}/get-pip.py verify LIMIT 1)
+ if(NOT verify)
+ message(FATAL_ERROR "
+ Fetching get-pip.py failed. This often happens when CMake is built from source without SSL/TLS support.
+ Consider using a different cmake version or fall back to manually installing pip into the virtualenv.
+ ")
+ endif()
+
+ # Execute the script
+ dune_execute_process(COMMAND ${DUNE_PYTHON_VIRTUALENV_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/get-pip.py
+ ERROR_MESSAGE "Fatal error when installing pip into the virtualenv."
+ )
+ else()
+ message(FATAL_ERROR "dune-common set up a virtualenv, but needs pip to be installed into it.
+ You can either install it yourself manually activating the virtualenv with
+ the activate script in your build directory ${CMAKE_BINARY_DIR} or you set
+ the CMake variable DUNE_PYTHON_ALLOW_GET_PIP to allow Dune to use get-pip.py
+ from https://bootstrap.pypa.io/get-pip.py")
+ endif()
+endif()
--- /dev/null
+# Module to generate CMake API documentation with Sphinx
+#
+# .. cmake_function:: dune_cmake_sphinx_doc
+#
+# .. cmake_brief::
+#
+# Generate the documentation that you are just browsing!!!
+#
+# .. cmake_param:: BUILDTYPE
+# :multi:
+#
+# Set the type of build that is requested. By default, "html" is chosen.
+# The list of available build types:
+#
+# * `html`
+#
+# .. cmake_param:: SPHINX_CONF
+# :single:
+# :argname: conf
+#
+# A template for a conf file to be passed to :code:`sphinx-build`.
+# The real configuration file will be generated through CMakes
+# :code:`configure_file` mechanism. A reasonable default file is
+# provided by dune-common. Only use this if you want to create
+# custom documentation.
+#
+# .. cmake_param:: RST_SOURCES
+# :multi:
+# :argname: src
+#
+# A list of rst sources, that should be configured into the build tree
+# (using :code:`configure_file`). If omitted, this defaults to
+# :code:`index.rst` and :code:`contents.rst` with suitable content.
+# Only use this if you want to create custom documentation.
+#
+# .. cmake_param:: MODULE_ONLY
+# :option:
+#
+# Only document CMake functionality from the current Dune module.
+#
+# Generate a documentation for the CMake API. A set of cmake
+# modules defined by the parameters and all functions and macros
+# there in are automatically generated. The top level directory
+# of the documentation is the current build directory (aka the
+# directory that this function is called from)
+#
+# There are some assumptions on how the documentation in
+# the CMake modules is written:
+#
+# * At the beginning of each CMake module there is a comment block that is written in restructured text.
+# The first two characters of each line (the comment character
+# and a blank) are ignored. Any resulting content of lines most form valid rst.
+# * TODO document more
+#
+include_guard(GLOBAL)
+
+find_package(Sphinx)
+# text for feature summary
+set_package_properties("Sphinx" PROPERTIES
+ DESCRIPTION "Documentation generator"
+ URL "www.sphinx-doc.org"
+ PURPOSE "To generate the documentation from CMake and Python sources")
+
+function(dune_cmake_sphinx_doc)
+ # Only proceed if Sphinx was found on the system
+ if(NOT SPHINX_FOUND)
+ message("-- Skipping building CMake API documentation (Sphinx was not found!)")
+ return()
+ endif()
+
+ # Only proceed if the python interpreter was found by cmake
+ if(NOT Python3_Interpreter_FOUND)
+ message("-- Skipping building CMake API documentation (Python interpreter was not found!)")
+ return()
+ endif()
+
+ # Parse Arguments
+ set(OPTION MODULE_ONLY)
+ set(SINGLE SPHINX_CONF)
+ set(MULTI BUILDTYPE RST_SOURCES)
+ include(CMakeParseArguments)
+ cmake_parse_arguments(SPHINX_CMAKE "${OPTION}" "${SINGLE}" "${MULTI}" ${ARGN})
+ if(SPHINX_CMAKE_UNPARSED_ARGUMENTS)
+ message(WARNING "Unparsed arguments in dune_cmake_sphinx_doc: This often indicates typos!")
+ endif()
+
+ # Apply defaults
+ if(NOT SPHINX_CMAKE_BUILDTYPE)
+ set(SPHINX_CMAKE_BUILDTYPE html)
+ endif()
+
+ # Extract the script directory from dune-common
+ dune_module_path(MODULE dune-common RESULT DUNE_SPHINX_EXT_PATH SCRIPT_DIR)
+
+ # Find the configuration file template.
+ if(NOT SPHINX_CMAKE_SPHINX_CONF)
+ set(SPHINX_CMAKE_SPHINX_CONF ${DUNE_SPHINX_EXT_PATH}/conf.py.in)
+ endif()
+
+ # Apply defaults to the rst sources that are not module dependent.
+ if(NOT SPHINX_CMAKE_RST_SOURCES)
+ file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/contents.rst "")
+ set(SPHINX_CMAKE_RST_SOURCES ${DUNE_SPHINX_EXT_PATH}/index.rst.in ${CMAKE_CURRENT_BINARY_DIR}/contents.rst)
+ endif()
+
+ # Write the conf.py, which sets up Sphinx into the build directory
+ configure_file(${SPHINX_CMAKE_SPHINX_CONF} ${CMAKE_CURRENT_BINARY_DIR}/conf.py)
+
+ # Check whether we need to look through all dependencies
+ set(DOC_CMAKE_MODULES)
+ if(NOT SPHINX_CMAKE_MODULE_ONLY)
+ set(DOC_CMAKE_MODULES ${ALL_DEPENDENCIES})
+ endif()
+
+ # Now treat the module dependent rst sources.
+ set(CMAKE_DOC_DEPENDENCIES "")
+ set(${PROJECT_NAME}_PREFIX ${PROJECT_SOURCE_DIR})
+ foreach(dep ${DOC_CMAKE_MODULES} ${PROJECT_NAME})
+ # Look for a build system documentation exported by the module dep
+ set(RSTFILE "")
+ # check in the correct path for non-installed modules
+ if(EXISTS ${${dep}_PREFIX}/doc/buildsystem/${dep}.rst)
+ set(RSTFILE ${${dep}_PREFIX}/doc/buildsystem/${dep}.rst)
+ endif()
+ # now check for the correct path taking into account installed ones
+ if(EXISTS ${${dep}_PREFIX}/share/doc/${dep}/${dep}.rst)
+ set(RSTFILE ${${dep}_PREFIX}/share/doc/${dep}/${dep}.rst)
+ endif()
+ # Now process the file, if we have found one
+ if(RSTFILE)
+ # add it to index.rst then.
+ set(CMAKE_DOC_DEPENDENCIES "${CMAKE_DOC_DEPENDENCIES} ${dep}\n")
+ # ... and copy the rst file to the current build.
+ configure_file(${RSTFILE} ${CMAKE_CURRENT_BINARY_DIR}/${dep}.rst)
+ endif()
+ endforeach()
+
+ # Write the non-module dependent rst source files from templates
+ foreach(rstin ${SPHINX_CMAKE_RST_SOURCES})
+ get_filename_component(rst ${rstin} NAME_WE)
+ configure_file(${rstin} ${CMAKE_CURRENT_BINARY_DIR}/${rst}.rst)
+ endforeach()
+
+ # Generate the list of modules by looking through the module paths
+ # of all dependencies for files matching *.cmake
+ set(SPHINX_DOC_MODULE_LIST)
+ set(${PROJECT_NAME}_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake/modules)
+ foreach(dep ${DOC_CMAKE_MODULES} ${PROJECT_NAME})
+ file(GLOB modules "${${dep}_MODULE_PATH}/*.cmake")
+ set(SPHINX_DOC_MODULE_LIST ${SPHINX_DOC_MODULE_LIST} ${modules})
+ endforeach()
+
+ # Initialize a variable that collects all dependencies of the documentation
+ set(DOC_DEPENDENCIES)
+
+ # Generate the rst files for all cmake modules
+ foreach(module ${SPHINX_DOC_MODULE_LIST})
+ get_filename_component(modname ${module} NAME)
+ add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/modules/${modname}
+ COMMAND ${Python3_EXECUTABLE} ${DUNE_SPHINX_EXT_PATH}/extract_cmake_data.py
+ --module=${module}
+ --builddir=${CMAKE_CURRENT_BINARY_DIR}
+ DEPENDS ${module}
+ COMMENT "Extracting CMake API documentation from ${modname}"
+ )
+ set(DOC_DEPENDENCIES ${DOC_DEPENDENCIES} ${CMAKE_CURRENT_BINARY_DIR}/modules/${modname})
+ endforeach()
+
+ # Call Sphinx once for each requested build type
+ foreach(type ${SPHINX_CMAKE_BUILDTYPE})
+ # Call the sphinx executable
+ add_custom_target(sphinx_${type}
+ COMMAND ${SPHINX_EXECUTABLE}
+ -b ${type}
+ -w ${PROJECT_BINARY_DIR}/SphinxError.log
+ -c ${CMAKE_CURRENT_BINARY_DIR}
+ ${CMAKE_CURRENT_BINARY_DIR}
+ ${CMAKE_CURRENT_BINARY_DIR}/${type}
+ WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
+ DEPENDS ${DOC_DEPENDENCIES}
+ )
+ add_dependencies(doc sphinx_${type})
+ endforeach()
+endfunction()
--- /dev/null
+include_guard(GLOBAL)
+find_package(Sphinx)
+find_package(Python3 COMPONENTS Interpreter Development)
+
+function(dune_sphinx_doc)
+ # Only proceed if Sphinx was found on the system
+ if(NOT SPHINX_FOUND)
+ message("-- Skipping building Sphinx documentation (Sphinx was not found!)")
+ return()
+ endif()
+
+ # Only proceed if the python interpreter was found by cmake
+ if(NOT Python3_Interpreter_FOUND)
+ message("-- Skipping building Sphinx documentation (Python interpreter was not found!)")
+ return()
+ endif()
+
+ # Parse Arguments
+ include(CMakeParseArguments)
+ cmake_parse_arguments(SPHINX_DOC "" "CONF" "BUILDTYPE" ${ARGN})
+ if(SPHINX_DOC_UNPARSED_ARGUMENTS)
+ message(WARNING "Unparsed arguments in dune_sphinx_doc")
+ endif()
+
+ # copy conf.py into build directory
+ if(NOT SPHINX_DOC_CONF)
+ set(SPHINX_DOC_CONF conf.py)
+ endif()
+ if(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${SPHINX_DOC_CONF}.in)
+ configure_file(${SPHINX_DOC_CONF}.in ${CMAKE_CURRENT_BINARY_DIR}/conf.py)
+ elseif(EXISTS ${CMAKE_CURRENT_SOUREC_DIR}/${SPHINX_DOC_CONF})
+ configure_file(${SPHINX_DOC_CONF} ${CMAKE_CURRENT_BINARY_DIR}/conf.py COPYONLY)
+ else()
+ message(SEND_ERROR "Sphinx configuration '${SPHINX_DOC_CONF}' not found.")
+ endif()
+
+ # call Sphinx for each requested build type
+ if(NOT SPHINX_DOC_BUILDTYPE)
+ set(SPHINX_DOC_BUILDTYPE html)
+ endif()
+ foreach(type ${SPHINX_DOC_BUILDTYPE})
+ add_custom_target(sphinx_doc_${type}
+ COMMAND ${SPHINX_EXECUTABLE}
+ -b ${type}
+ -w ${PROJECT_BINARY_DIR}/Sphinx-${type}.log
+ -c ${CMAKE_CURRENT_BINARY_DIR}
+ ${CMAKE_CURRENT_BINARY_DIR}
+ ${CMAKE_CURRENT_BINARY_DIR}/${type}
+ WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
+ DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/conf.py
+ )
+ add_dependencies(sphinx_doc_${type} sphinx_files)
+ add_dependencies(doc sphinx_doc_${type})
+ endforeach()
+endfunction()
+
+function(add_sphinx_target base file)
+ find_program(JUPYTER jupyter)
+ get_filename_component(extension ${file} EXT)
+ set(SPHINXDIR ${PROJECT_BINARY_DIR}/doc/sphinx)
+ set(OUT ${SPHINXDIR}/${file})
+ set(IN ${CMAKE_CURRENT_SOURCE_DIR}/${file})
+ string(REGEX REPLACE "\\.[^.]*$" "" filebase ${file})
+ set(TARGET ${base}.${file})
+ add_custom_target(${TARGET} DEPENDS ${OUT})
+ add_dependencies(sphinx_files ${TARGET})
+ add_custom_command(
+ OUTPUT ${OUT}
+ DEPENDS ${IN}
+ COMMAND ${CMAKE_COMMAND} -E copy ${IN} ${OUT}
+ VERBATIM
+ )
+ if ("${extension}" STREQUAL ".ipynb")
+ if (JUPYTER)
+ set(TARGET ${base}.${filebase}.rst)
+ set(OUTRST ${SPHINXDIR}/${filebase}.rst)
+ add_custom_target(${TARGET} DEPENDS ${OUTRST})
+ add_dependencies(sphinx_files ${TARGET})
+ add_custom_command(
+ OUTPUT ${OUTRST}
+ DEPENDS ${OUT}
+ COMMAND jupyter nbconvert --ExecutePreprocessor.timeout=-1 --execute --allow-errors --to="rst" ${OUT} --output ${filebase}.rst
+ COMMAND sed -i "s/raw:: latex/math::/g" ${OUTRST}
+ WORKING_DIRECTORY ${SPHINXDIR}
+ VERBATIM
+ )
+ endif()
+ endif()
+endfunction()
+
+function(add_sphinx_files base)
+ foreach(file ${ARGN})
+ add_sphinx_target(${base} ${file})
+ endforeach()
+endfunction()
+function(add_sphinx_targets base)
+ add_custom_target(sphinx_files)
+ add_sphinx_files(${base} ${ARGN})
+ dune_sphinx_doc()
+endfunction()
--- /dev/null
+# This Module configures the DUNE debug streams.
+#
+# .. cmake_variable:: MINIMAL_DEBUG_LEVEL
+#
+# This variable configures the Dune debug streams.
+# Standard debug streams with level below :code:`MINIMAL_DEBUG_LEVEL` will
+# collapse to doing nothing if output is requested. Possible values are
+# :code:`vverb`, :code:`verb`, :code:`info`, :code:`warn` and :code:`grave`.
+# Defaults to :code:`warn`.
+#
+include_guard(GLOBAL)
+
+macro(dune_set_minimal_debug_level)
+set(MINIMAL_DEBUG_LEVEL ON CACHE STRING "set the MINIMAL_DEBUG_LEVEL. Standard debug streams with level below MINIMAL_DEBUG_LEVEL will collapse to doing nothing if output is requested. (default=warn)")
+set_property(CACHE MINIMAL_DEBUG_LEVEL PROPERTY STRINGS
+ "grave" "warn" "info" "verb" "vverb")
+if(MINIMAL_DEBUG_LEVEL STREQUAL "grave")
+ set(DUNE_MINIMAL_DEBUG_LEVEL 5)
+elseif(MINIMAL_DEBUG_LEVEL STREQUAL "info")
+ set(DUNE_MINIMAL_DEBUG_LEVEL 3)
+elseif(MINIMAL_DEBUG_LEVEL STREQUAL "verb")
+ set(DUNE_MINIMAL_DEBUG_LEVEL 2)
+elseif(MINIMAL_DEBUG_LEVEL STREQUAL "vverb")
+ set(DUNE_MINIMAL_DEBUG_LEVEL 1)
+# default to warn
+else()
+ set(DUNE_MINIMAL_DEBUG_LEVEL 4)
+endif()
+message(STATUS "Set Minimal Debug Level to ${DUNE_MINIMAL_DEBUG_LEVEL}")
+endmacro(dune_set_minimal_debug_level)
--- /dev/null
+# This module provides convenience macros to provide files from the source tree in the build tree.
+#
+# It provides the following macros:
+#
+# dune_add_copy_command(filename)
+#
+# This macro adds a file-copy command.
+# The file_name is the name of a file that exists
+# in the source tree. This file will be copied
+# to the build tree when executing this command.
+# Notice that this does not create a top-level
+# target. In order to do this you have to additionally
+# call add_custom_target(...) with dependency
+# on the file.
+#
+# dune_add_copy_target(target_name file_name)
+#
+# This macro adds a file-copy target under given target_name.
+# The file_name is the name of a file that exists
+# in the source tree. This file will be copied
+# to the build tree.
+#
+# dune_add_copy_dependency(target file_name)
+#
+# This macro adds a copy-dependency to a target
+# The file_name is the name of a file that exists
+# in the source tree. This file will be copied
+# to the build tree.
+#
+#
+# .. cmake_function:: dune_add_copy_command
+#
+# .. cmake_param:: filename
+# :positional:
+# :single:
+# :required:
+#
+# TODO DOC ME!
+#
+# .. cmake_function:: dune_add_copy_target
+#
+# .. cmake_param:: target_name
+# :positional:
+# :single:
+# :required:
+#
+# .. cmake_param:: filename
+# :positional:
+# :single:
+# :required:
+#
+# TODO DOC ME!
+#
+# .. cmake_function:: dune_add_copy_dependency
+#
+# .. cmake_param:: target
+# :positional:
+# :single:
+# :required:
+#
+# .. cmake_param:: filename
+# :positional:
+# :single:
+# :required:
+#
+# TODO DOC ME!
+#
+# .. cmake_function:: dune_symlink_to_source_tree
+#
+# .. cmake_param:: NAME
+# :single:
+#
+# The name of the symlink, defaults to :code:`src_dir`.
+#
+# This function will place a symlink into every subdirectory
+# of the build tree, that allows to jump to the corresponding
+# source directory. Call this from your top-level :code:`CMakeLists.txt`
+# to enable it for a given module. To enable it for all modules,
+# set the variable :ref:`DUNE_SYMLINK_TO_SOURCE_TREE` instead.
+# If used on Windows systems, a warning is issued.
+#
+# .. cmake_variable:: DUNE_SYMLINK_TO_SOURCE_TREE
+#
+# If this variable is set to TRUE, the functionality of
+# :ref:`dune_symlink_to_source_tree` is enabled in all modules.
+# This will place symlinks to the corresponding source directory
+# in every subdirectory of the build directory.
+#
+# .. cmake_variable:: DUNE_SYMLINK_RELATIVE_LINKS
+#
+# If this variable is set to TRUE, the buildsystem will create relative
+# links instead of absolute ones.
+#
+# .. cmake_function:: dune_symlink_to_source_files
+#
+# .. cmake_param:: FILES
+# :multi:
+# :required:
+#
+# The list of files to symlink
+#
+# .. cmake_param:: DESTINATION
+# :multi:
+# :required:
+#
+# Relative path of the target directory
+#
+# Create symlinks in the build tree that
+# point to particular files in the source directory. This is usually
+# used for grid and ini files and the like. On Windows systems,
+# a warning is issued and copying is used as a fallback to
+# symlinking.
+#
+include_guard(GLOBAL)
+
+macro(dune_add_copy_command file_name)
+ add_custom_command(
+ OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${file_name}"
+ COMMAND ${CMAKE_COMMAND}
+ ARGS -E copy "${CMAKE_CURRENT_SOURCE_DIR}/${file_name}" "${file_name}"
+ DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/${file_name}"
+ )
+endmacro(dune_add_copy_command file_name)
+
+macro(dune_add_copy_target target_name file_name)
+ dune_add_copy_command(${file_name})
+ add_custom_target("${target_name}" ALL DEPENDS "${file_name}")
+endmacro(dune_add_copy_target target_name file_name)
+
+macro(dune_add_copy_dependency target file_name)
+ message(STATUS "Adding copy-to-build-dir dependency for ${file_name} to target ${target}")
+ dune_add_copy_target("${target}_copy_${file_name}" "${file_name}")
+ add_dependencies(${target} "${target}_copy_${file_name}")
+endmacro(dune_add_copy_dependency)
+
+function(dune_symlink_to_source_tree)
+ # if source and binary dir are equal then the symlink will create serious problems
+ if(PROJECT_SOURCE_DIR STREQUAL PROJECT_BINARY_DIR)
+ return()
+ endif()
+
+ # parse arguments
+ include(CMakeParseArguments)
+ cmake_parse_arguments(ARG "" "NAME" "" ${ARGN})
+ if(NOT ARG_NAME)
+ set(ARG_NAME "src_dir")
+ endif()
+
+ # check for Windows to issue a warning
+ if(${CMAKE_SYSTEM_NAME} STREQUAL "Windows")
+ if(NOT DEFINED DUNE_WINDOWS_SYMLINK_WARNING)
+ message(WARNING "Your module wanted to create symlinks, but you cannot do that on your platform.")
+ set(DUNE_WINDOWS_SYMLINK_WARNING)
+ endif()
+ else()
+ # get a list of all files in the current source directory and below.
+ file(GLOB_RECURSE files RELATIVE ${PROJECT_SOURCE_DIR} "*CMakeLists.txt")
+
+ # iterate over all files, extract the directory name and write a symlink in the corresponding build directory
+ foreach(f ${files})
+ get_filename_component(dir ${f} DIRECTORY)
+ set(_target "${PROJECT_SOURCE_DIR}/${dir}")
+ if(DUNE_SYMLINK_RELATIVE_LINKS)
+ file(RELATIVE_PATH _target "${PROJECT_BINARY_DIR}/${dir}" "${_target}")
+ endif()
+ execute_process(COMMAND ${CMAKE_COMMAND} "-E" "create_symlink" "${_target}" "${PROJECT_BINARY_DIR}/${dir}/${ARG_NAME}")
+ endforeach()
+ endif()
+endfunction(dune_symlink_to_source_tree)
+
+function(dune_symlink_to_source_files)
+
+ # if source and binary dir are equal then the symlink will create serious problems
+ if(PROJECT_SOURCE_DIR STREQUAL PROJECT_BINARY_DIR)
+ return()
+ endif()
+
+ # parse arguments
+ include(CMakeParseArguments)
+ cmake_parse_arguments(ARG "" "DESTINATION" "FILES" ${ARGN})
+ if(ARG_UNPARSED_ARGUMENTS)
+ message(WARNING "You are using dune_symlink_to_source_files without named arguments (or have typos in your named arguments)!")
+ endif()
+
+ # create symlinks for all given files
+ foreach(f ${ARG_FILES})
+ # check whether there is an explicitly given destination
+ if(ARG_DESTINATION)
+ set(destination "${ARG_DESTINATION}/")
+ else()
+ set(destination "")
+ endif()
+ # check for Windows to issue a warning
+ if(${CMAKE_SYSTEM_NAME} STREQUAL "Windows")
+ if(NOT DEFINED DUNE_WINDOWS_SYMLINK_WARNING)
+ message(WARNING "Your module wanted to create symlinks, but you cannot do that on your platform.")
+ set(DUNE_WINDOWS_SYMLINK_WARNING)
+ endif()
+ # create a copy
+ execute_process(COMMAND ${CMAKE_COMMAND} "-E" "copy" "${CMAKE_CURRENT_SOURCE_DIR}/${f}" "${CMAKE_CURRENT_BINARY_DIR}/${destination}${f}")
+ else()
+ # create symlink
+ set(_target "${CMAKE_CURRENT_SOURCE_DIR}/${f}")
+ if(DUNE_SYMLINK_RELATIVE_LINKS)
+ file(RELATIVE_PATH _target "${CMAKE_CURRENT_BINARY_DIR}/${destination}" "${_target}")
+ endif()
+ execute_process(COMMAND ${CMAKE_COMMAND} "-E" "create_symlink" "${_target}" "${CMAKE_CURRENT_BINARY_DIR}/${destination}${f}")
+ endif()
+ endforeach()
+endfunction(dune_symlink_to_source_files)
--- /dev/null
+# Module that provides tools for testing the Dune way.
+#
+# Note that "the Dune way" of doing this has changed after
+# the 2.4 release. See the build system documentation for details.
+#
+# .. cmake_function:: dune_declare_test_label
+#
+# .. cmake_brief::
+#
+# Declare labels for :ref:`dune_add_test`.
+#
+# .. cmake_param:: LABELS
+# :multi:
+#
+# The names of labels to declare. Label names must be nonempty and
+# consist only of alphanumeric characters plus :code:`-` and :code:`_`
+# to make sure it is easy to construct regular expressions from them for
+# :code:`ctest -L ${label_regex}`.
+#
+# Labels need to be declared to ensure that the target
+# :code:`build_${label}_tests` exists. They will normally be declared
+# on-demand by :ref:`dune_add_test`. But sometimes it is useful to be able to
+# run :code:`make build_${label}_tests` whether or not any tests with that
+# label exists in a module. For these cases :ref:`dune_declare_test_label` can
+# be called explicitly.
+#
+# The label :code:`quick` is always predeclared.
+#
+# .. cmake_function:: dune_add_test
+#
+# .. cmake_brief::
+#
+# Adds a test to the Dune testing suite!
+#
+# .. cmake_param:: NAME
+# :single:
+#
+# The name of the test that should be added. If an executable
+# is also added (by specifying SOURCES), the executable is also
+# named accordingly. If omitted, the name will be deduced from
+# the (single) sources parameter or from the given target. Note
+# that this requires you to take care, that you only use a target
+# or source file for but one such test.
+#
+# .. cmake_param:: SOURCES
+# :multi:
+#
+# The source files that this test depends on. These are the
+# sources that will be passed to :ref:`add_executable`.
+#
+# You *must* specify either :code:`SOURCES` or :code:`TARGET`.
+#
+# .. cmake_param:: TARGET
+# :single:
+#
+# An executable target which should be used for the test. Use
+# this option over the :code:`SOURCES` parameter if you want to
+# reuse already added targets.
+#
+# You *must* specify either :code:`SOURCES` or :code:`TARGET`.
+#
+# .. cmake_param:: COMPILE_DEFINITIONS
+# :multi:
+# :argname: def
+#
+# A set of compile definitions to add to the target.
+# Only definitions beyond the application of :ref:`add_dune_all_flags`
+# have to be stated.
+# This is only used, if :code:`dune_add_test` adds the executable itself.
+#
+# .. cmake_param:: COMPILE_FLAGS
+# :multi:
+# :argname: flag
+#
+# A set of non-definition compile flags to add to the target.
+# Only flags beyond the application of :ref:`add_dune_all_flags`
+# have to be stated.
+# This is only used, if :code:`dune_add_test` adds the executable itself.
+#
+# .. cmake_param:: LINK_LIBRARIES
+# :multi:
+# :argname: lib
+#
+# A list of libraries to link the target to.
+# Only libraries beyond the application of :ref:`add_dune_all_flags`
+# have to be stated.
+# This is only used, if :code:`dune_add_test` adds the executable itself.
+#
+# .. cmake_param:: EXPECT_COMPILE_FAIL
+# :option:
+#
+# If given, the test is expected to not compile successfully!
+#
+# .. cmake_param:: EXPECT_FAIL
+# :option:
+#
+# If given, this test is expected to compile, but fail to run.
+#
+# .. cmake_param:: CMD_ARGS
+# :multi:
+# :argname: arg
+#
+# Command line arguments that should be passed to this test.
+#
+# .. cmake_param:: MPI_RANKS
+# :multi:
+# :argname: ranks
+#
+# The numbers of cores that this test should be executed with.
+# Note that one test (in the ctest sense) is created for each number
+# given here. Any number exceeding the user-specified processor maximum
+# :ref:`DUNE_MAX_TEST_CORES` will be ignored. Tests with a
+# processor number :code:`n` higher than one will have the suffix
+# :code:`-mpi-n` appended to their name. You need to specify the
+# TIMEOUT option when specifying the MPI_RANKS option.
+#
+# .. cmake_param:: CMAKE_GUARD
+# :multi:
+# :argname: condition
+#
+# A number of conditions that CMake should evaluate before adding this
+# test. If one of the conditions fails, the test should be shown
+# as skipped in the test summary. Use this feature instead of guarding
+# the call to :code:`dune_add_test` with an :code:`if` clause.
+#
+# The passed condition can be a complex expression like
+# `( A OR B ) AND ( C OR D )`. Mind the spaces around the parentheses.
+#
+# Example: Write CMAKE_GUARD dune-foo_FOUND if you want your test to only
+# build and run when the dune-foo module is present.
+#
+# .. cmake_param:: COMMAND
+# :multi:
+# :argname: cmd
+#
+# You may specify the COMMAND option to give the exact command line to be
+# executed when running the test. This defaults to the name of the executable
+# added by dune_add_test for this test or the name of the executable of the given TARGET.
+# Note that if you specify both CMD_ARGS
+# and COMMAND, the given CMD_ARGS will be put behind your COMMAND. If you use
+# this in combination with the MPI_RANKS parameter, the call to mpi will still be
+# wrapped around the given commands.
+#
+# .. cmake_param:: COMPILE_ONLY
+# :option:
+#
+# Set if the given test should only be compiled during :code:`make build_tests`,
+# but not run during :code:`make test`. This is useful if you compile the same
+# executable twice, but with different compile flags, where you want to assure that
+# it compiles with both sets of flags, but you already know they will produce the
+# same result.
+#
+# .. cmake_param:: TIMEOUT
+# :single:
+#
+# If set, the test will time out after the given number of seconds. This supersedes
+# any timeout setting in ctest (see `cmake --help-property TIMEOUT`). If you
+# specify the MPI_RANKS option, you need to specify a TIMEOUT.
+#
+# .. cmake_param:: LABELS
+# :multi:
+#
+# A list of labels to add to the test. This has two effects: it sets
+# the LABELS property on the test so :code:`ctest -L ${label_regex}` can
+# be used to run all tests with certain labels. It also adds any
+# targets created as dependencies to a custom target, so you can build
+# all tests with a particular label by doing :code:`make
+# build_${label}_tests` without having to build all the other tests as
+# well.
+#
+# The :code:`build_${label}_tests` targets are created on-demand the
+# first time a test with that label is added. In some situations it can
+# depend on the values of cmake cache variables whether a test is added,
+# and then it can happen that the :code:`build_${target}_tests` target
+# exists only sometimes. If your workflow relies on the existance of
+# these targets, even if building them just returns successfully without
+# doing anything, you can ensure they exist by calling
+# :ref:`dune_declare_test_label` unconditionally. The label
+# :code:`quick` is always predeclared in this way.
+#
+# The label names must be non-empty, and must only contain alphanumeric
+# characters other than :code:`-` or :code:`_`. This restriction is in
+# place to make it easy to construct regular expressions from the label
+# names for :code:`ctest -L ${label_regex}`.
+#
+# This function defines the Dune way of adding a test to the testing suite.
+# You may either add the executable yourself through :ref:`add_executable`
+# and pass it to the :code:`TARGET` option, or you may rely on :ref:`dune_add_test`
+# to do so.
+#
+# .. cmake_variable:: DUNE_REENABLE_ADD_TEST
+#
+# You may set this variable to True either through your opts file or in your module
+# (before the call to :code:`include(DuneMacros)`) to suppress the error that is thrown if
+# :code:`add_test` is used. You should only do that if you have proper reason to do so.
+#
+# .. cmake_variable:: DUNE_MAX_TEST_CORES
+#
+# You may set this variable to give an upperbound to the number of processors, that
+# a single test may use. Defaults to 2, when MPI is found and to 1 otherwise.
+#
+# .. cmake_variable:: DUNE_BUILD_TESTS_ON_MAKE_ALL
+#
+# You may set this variable through your opts file or on a per module level (in the toplevel
+# :code:`CMakeLists.txt` before :code:`include(DuneMacros)`) to have the Dune build system
+# build all tests during `make all`. Note, that this may take quite some time for some modules.
+# If not in use, you have to build tests through the target :code:`build_tests`.
+#
+include_guard(GLOBAL)
+
+# enable the testing suite on the CMake side.
+enable_testing()
+include(CTest)
+
+# Introduce a target that triggers the building of all tests
+add_custom_target(build_tests)
+
+function(dune_declare_test_label)
+ include(CMakeParseArguments)
+ set(OPTIONS)
+ set(SINGLEARGS)
+ set(MULTIARGS LABELS)
+ cmake_parse_arguments(arg "${OPTIONS}" "${SINGLEARGS}" "${MULTIARGS}" ${ARGN})
+
+ if( (DEFINED arg_UNPARSED_ARGUMENTS) AND NOT ( arg_UNPARSED_ARGUMENTS STREQUAL "" ) )
+ message(FATAL_ERROR "Unhandled extra arguments given to dune_declare_test_label(): "
+ "<${arg_UNPARSED_ARGUMENTS}>")
+ endif()
+
+ foreach(label IN LISTS arg_LABELS)
+ # Make sure the label is not empty, and does not contain any funny
+ # characters, in particular regex characters
+ if(NOT (label MATCHES "[-_0-9a-zA-Z]+"))
+ message(FATAL_ERROR "Refusing to add label \"${label}\" since it is "
+ "empty or contains funny characters (characters other than "
+ "alphanumeric ones and \"-\" or \"_\"; the intent of this restriction "
+ "is to make construction of the argument to \"ctest -L\" easier")
+ endif()
+ set(target "build_${label}_tests")
+ if(NOT TARGET "${target}")
+ add_custom_target("${target}")
+ endif()
+ endforeach()
+endfunction(dune_declare_test_label)
+
+# predefine "quick" test label so build_quick_tests can be built
+# unconditionally
+dune_declare_test_label(LABELS quick)
+
+# Set the default on the variable DUNE_MAX_TEST_CORES
+if(NOT DUNE_MAX_TEST_CORES)
+ set(DUNE_MAX_TEST_CORES 2)
+endif()
+
+function(dune_add_test)
+ include(CMakeParseArguments)
+ set(OPTIONS EXPECT_COMPILE_FAIL EXPECT_FAIL SKIP_ON_77 COMPILE_ONLY)
+ set(SINGLEARGS NAME TARGET TIMEOUT)
+ set(MULTIARGS SOURCES COMPILE_DEFINITIONS COMPILE_FLAGS LINK_LIBRARIES CMD_ARGS MPI_RANKS COMMAND CMAKE_GUARD LABELS)
+ cmake_parse_arguments(ADDTEST "${OPTIONS}" "${SINGLEARGS}" "${MULTIARGS}" ${ARGN})
+
+ # Check whether the parser produced any errors
+ if(ADDTEST_UNPARSED_ARGUMENTS)
+ message(WARNING "Unrecognized arguments ('${ADDTEST_UNPARSED_ARGUMENTS}') for dune_add_test!")
+ endif()
+
+ # Check input for validity and apply defaults
+ if(NOT ADDTEST_SOURCES AND NOT ADDTEST_TARGET)
+ message(FATAL_ERROR "You need to specify either the SOURCES or the TARGET option for dune_add_test!")
+ endif()
+ if(ADDTEST_SOURCES AND ADDTEST_TARGET)
+ message(FATAL_ERROR "You cannot specify both SOURCES and TARGET for dune_add_test")
+ endif()
+ if(NOT ADDTEST_NAME)
+ # try deducing the test name from the executable name
+ if(ADDTEST_TARGET)
+ set(ADDTEST_NAME ${ADDTEST_TARGET})
+ endif()
+ # try deducing the test name form the source name
+ if(ADDTEST_SOURCES)
+ # deducing a name is only possible with a single source argument
+ list(LENGTH ADDTEST_SOURCES len)
+ if(NOT len STREQUAL "1")
+ message(FATAL_ERROR "Cannot deduce test name from multiple sources!")
+ endif()
+ # strip file extension
+ get_filename_component(ADDTEST_NAME ${ADDTEST_SOURCES} NAME_WE)
+ endif()
+ endif()
+ if(NOT ADDTEST_COMMAND)
+ if(ADDTEST_TARGET)
+ set(ADDTEST_COMMAND ${ADDTEST_TARGET})
+ else()
+ set(ADDTEST_COMMAND ${ADDTEST_NAME})
+ endif()
+ endif()
+ if(ADDTEST_MPI_RANKS AND (NOT ADDTEST_TIMEOUT))
+ message(FATAL_ERROR "dune_add_test: You need to specify the TIMEOUT parameter if using the MPI_RANKS parameter.")
+ endif()
+ if(NOT ADDTEST_MPI_RANKS)
+ set(ADDTEST_MPI_RANKS 1)
+ endif()
+ if(NOT ADDTEST_TIMEOUT)
+ set(ADDTEST_TIMEOUT 300)
+ endif()
+ foreach(num ${ADDTEST_MPI_RANKS})
+ if(NOT "${num}" MATCHES "[1-9][0-9]*")
+ message(FATAL_ERROR "${num} was given to the MPI_RANKS arugment of dune_add_test, but it does not seem like a correct processor number")
+ endif()
+ endforeach()
+ if(ADDTEST_SKIP_ON_77)
+ message(WARNING "The SKIP_ON_77 option for dune_add_test is obsolete, it is now enabled by default.")
+ endif()
+
+ # Discard all parallel tests if MPI was not found
+ if(NOT MPI_FOUND)
+ set(DUNE_MAX_TEST_CORES 1)
+ endif()
+
+ # Find out whether this test should be a dummy
+ set(SHOULD_SKIP_TEST FALSE)
+ set(FAILED_CONDITION_PRINTING "")
+ foreach(condition ${ADDTEST_CMAKE_GUARD})
+ separate_arguments(condition)
+ if(NOT (${condition}))
+ set(SHOULD_SKIP_TEST TRUE)
+ set(FAILED_CONDITION_PRINTING "${FAILED_CONDITION_PRINTING}std::cout << \" ${condition}\" << std::endl;\n")
+ endif()
+ endforeach()
+
+ # If we do nothing, switch the sources for a dummy source
+ if(SHOULD_SKIP_TEST)
+ dune_module_path(MODULE dune-common RESULT scriptdir SCRIPT_DIR)
+ set(ADDTEST_TARGET)
+ set(dummymain ${CMAKE_CURRENT_BINARY_DIR}/main77_${ADDTEST_NAME}.cc)
+ configure_file(${scriptdir}/main77.cc.in ${dummymain})
+ set(ADDTEST_SOURCES ${dummymain})
+ endif()
+
+ # Add the executable if it is not already present
+ if(ADDTEST_SOURCES)
+ add_executable(${ADDTEST_NAME} ${ADDTEST_SOURCES})
+ # add all flags to the target!
+ add_dune_all_flags(${ADDTEST_NAME})
+ # This is just a placeholder
+ target_compile_definitions(${ADDTEST_NAME} PUBLIC ${ADDTEST_COMPILE_DEFINITIONS})
+ target_compile_options(${ADDTEST_NAME} PUBLIC ${ADDTEST_COMPILE_FLAGS})
+ target_link_libraries(${ADDTEST_NAME} PUBLIC ${ADDTEST_LINK_LIBRARIES})
+ set(ADDTEST_TARGET ${ADDTEST_NAME})
+ endif()
+
+ # Make sure to exclude the target from all, even when it is user-provided
+ if(DUNE_BUILD_TESTS_ON_MAKE_ALL AND (NOT ADDTEST_EXPECT_COMPILE_FAIL))
+ set_property(TARGET ${ADDTEST_TARGET} PROPERTY EXCLUDE_FROM_ALL 0)
+ else()
+ set_property(TARGET ${ADDTEST_TARGET} PROPERTY EXCLUDE_FROM_ALL 1)
+ endif()
+
+ # make sure each label exists and its name is acceptable
+ dune_declare_test_label(LABELS ${ADDTEST_LABELS})
+
+ # Have build_tests and build_${label}_tests depend on the given target in
+ # order to trigger the build correctly
+ if(NOT ADDTEST_EXPECT_COMPILE_FAIL)
+ add_dependencies(build_tests ${ADDTEST_TARGET})
+ foreach(label IN LISTS ADDTEST_LABELS)
+ add_dependencies(build_${label}_tests ${ADDTEST_TARGET})
+ endforeach()
+ endif()
+
+ # Process the EXPECT_COMPILE_FAIL option
+ if(ADDTEST_EXPECT_COMPILE_FAIL)
+ set(ADDTEST_COMMAND "${CMAKE_COMMAND}")
+ set(ADDTEST_CMD_ARGS --build . --target ${ADDTEST_TARGET} --config "$<CONFIGURATION>")
+ endif()
+
+ # Add one test for each specified processor number
+ foreach(procnum ${ADDTEST_MPI_RANKS})
+ if((NOT "${procnum}" GREATER "${DUNE_MAX_TEST_CORES}") AND (NOT ADDTEST_COMPILE_ONLY))
+ set(ACTUAL_NAME ${ADDTEST_NAME})
+ set(ACTUAL_CMD_ARGS ${ADDTEST_CMD_ARGS})
+ if(TARGET "${ADDTEST_COMMAND}")
+ # if the target name is specified as command, expand to full path using the TARGET_FILE generator expression
+ set(ACTUAL_TESTCOMMAND "$<TARGET_FILE:${ADDTEST_COMMAND}>")
+ else()
+ set(ACTUAL_TESTCOMMAND "${ADDTEST_COMMAND}")
+ endif()
+
+ # modify test name and command for parallel tests
+ if(NOT ${procnum} STREQUAL "1")
+ set(ACTUAL_NAME "${ACTUAL_NAME}-mpi-${procnum}")
+ set(ACTUAL_CMD_ARGS ${MPIEXEC_PREFLAGS} ${MPIEXEC_NUMPROC_FLAG} ${procnum} "${ACTUAL_TESTCOMMAND}" ${MPIEXEC_POSTFLAGS} ${ACTUAL_CMD_ARGS})
+ set(ACTUAL_TESTCOMMAND "${MPIEXEC}")
+ endif()
+
+ # if this is a skipped test because a guard was false, overwrite the command
+ if(SHOULD_SKIP_TEST)
+ set(ACTUAL_TESTCOMMAND "$<TARGET_FILE:${ADDTEST_TARGET}>")
+ set(ACTUAL_CMD_ARGS)
+ endif()
+
+ # Now add the actual test
+ _add_test(NAME ${ACTUAL_NAME}
+ COMMAND "${ACTUAL_TESTCOMMAND}" ${ACTUAL_CMD_ARGS}
+ )
+
+ # Make the test depend on the existence of the target to trigger "Not Run" response
+ if(NOT ADDTEST_EXPECT_COMPILE_FAIL)
+ set_tests_properties(${ACTUAL_NAME} PROPERTIES REQUIRED_FILES $<TARGET_FILE:${ADDTEST_TARGET}>)
+ endif()
+ # Define the number of processors (ctest will coordinate this with the -j option)
+ set_tests_properties(${ACTUAL_NAME} PROPERTIES PROCESSORS ${procnum})
+ # Apply the timeout (which was defaulted to 5 minutes if not specified)
+ set_tests_properties(${ACTUAL_NAME} PROPERTIES TIMEOUT ${ADDTEST_TIMEOUT})
+ # Process the EXPECT_FAIL option
+ if(ADDTEST_EXPECT_COMPILE_FAIL OR ADDTEST_EXPECT_FAIL)
+ set_tests_properties(${ACTUAL_NAME} PROPERTIES WILL_FAIL true)
+ endif()
+ # When using ninja, we must call the build command from ${PROJECT_BINARY_DIR}
+ if(ADDTEST_EXPECT_COMPILE_FAIL)
+ set_tests_properties(${ACTUAL_NAME} PROPERTIES WORKING_DIRECTORY "${PROJECT_BINARY_DIR}")
+ endif()
+ # Skip the test if the return code is 77!
+ set_tests_properties(${ACTUAL_NAME} PROPERTIES SKIP_RETURN_CODE 77)
+ # Set the labels on the test
+ set_tests_properties(${ACTUAL_NAME} PROPERTIES LABELS "${ADDTEST_LABELS}")
+ endif()
+ endforeach()
+endfunction()
+
+macro(add_directory_test_target)
+ message(FATAL_ERROR "The function add_directory_test_target has been removed alongside all testing magic in dune-common. Check dune_add_test for the new way!")
+endmacro()
+
+macro(add_test)
+ if(NOT DUNE_REENABLE_ADD_TEST)
+ message(SEND_ERROR "Please use dune_add_test instead of add_test! If you need add_test in a downstream project, set the variable DUNE_REENABLE_ADD_TEST to True in that project to suppress this error.")
+ else()
+ _add_test(${ARGN})
+ endif()
+endmacro()
--- /dev/null
+#[=======================================================================[.rst:
+FindGMP
+-------
+
+Find the GNU MULTI-Precision Bignum (GMP) library
+and the corresponding C++ bindings GMPxx.
+
+This module searches for both libraries and only considers the package
+found if both can be located. It then defines separate targets for the C
+and the C++ library.
+
+Imported Targets
+^^^^^^^^^^^^^^^^
+
+This module provides the following imported targets, if found:
+
+``GMP::gmp``
+ Library target of the C library.
+``GMP::gmpxx``
+ Library target of the C++ library, which also links to the C library.
+
+Result Variables
+^^^^^^^^^^^^^^^^
+
+This will define the following variables:
+
+``GMP_FOUND``
+ True if the GMP library, the GMPxx headers and
+ the GMPxx library were found.
+
+Cache Variables
+^^^^^^^^^^^^^^^
+
+You may set the following variables to modify the behaviour of
+this module:
+
+``GMP_INCLUDE_DIR``
+ The directory containing ``gmp.h``.
+``GMP_LIB``
+ The path to the gmp library.
+``GMPXX_INCLUDE_DIR``
+ The directory containing ``gmpxx.h``.
+``GMPXX_LIB``
+ The path to the gmpxx library.
+
+#]=======================================================================]
+
+# Add a feature summary for this package
+include(FeatureSummary)
+set_package_properties(GMP PROPERTIES
+ DESCRIPTION "GNU multi-precision library"
+ URL "https://gmplib.org"
+)
+
+# Try finding the package with pkg-config
+find_package(PkgConfig QUIET)
+pkg_check_modules(PKG QUIET gmp gmpxx)
+
+# Try to locate the libraries and their headers, using pkg-config hints
+find_path(GMP_INCLUDE_DIR gmp.h HINTS ${PKG_gmp_INCLUDEDIR})
+find_library(GMP_LIB gmp HINTS ${PKG_gmp_LIBDIR})
+
+find_path(GMPXX_INCLUDE_DIR gmpxx.h HINTS ${PKG_gmpxx_INCLUDEDIR})
+find_library(GMPXX_LIB gmpxx HINTS ${PKG_gmpxx_LIBDIR})
+
+# Remove these variables from cache inspector
+mark_as_advanced(GMP_INCLUDE_DIR GMP_LIB GMPXX_INCLUDE_DIR GMPXX_LIB)
+
+# Report if package was found
+include(FindPackageHandleStandardArgs)
+find_package_handle_standard_args(GMP
+ DEFAULT_MSG
+ GMPXX_LIB GMPXX_INCLUDE_DIR GMP_INCLUDE_DIR GMP_LIB
+)
+
+# Set targets
+if(GMP_FOUND)
+ # C library
+ if(NOT TARGET GMP::gmp)
+ add_library(GMP::gmp UNKNOWN IMPORTED)
+ set_target_properties(GMP::gmp PROPERTIES
+ IMPORTED_LOCATION ${GMP_LIB}
+ INTERFACE_INCLUDE_DIRECTORIES ${GMP_INCLUDE_DIR}
+ )
+ endif()
+
+ # C++ library, which requires a link to the C library
+ if(NOT TARGET GMP::gmpxx)
+ add_library(GMP::gmpxx UNKNOWN IMPORTED)
+ set_target_properties(GMP::gmpxx PROPERTIES
+ IMPORTED_LOCATION ${GMPXX_LIB}
+ INTERFACE_INCLUDE_DIRECTORIES ${GMPXX_INCLUDE_DIR}
+ INTERFACE_LINK_LIBRARIES GMP::gmp
+ )
+ endif()
+endif()
--- /dev/null
+# .. cmake_module::
+#
+# Module that checks for inkscape
+#
+# Sets the following variables
+#
+# :code:`INKSCAPE_FOUND`
+# Whether inkscape was found
+#
+# :code:`INKSCAPE`
+# Path to inkscape to generate .png's form .svg's
+#
+
+find_program(INKSCAPE inkscape DOC "Path to inkscape to generate png files from svg files")
+find_program(CONVERT convert DOC "Path to convert program")
+if(INKSCAPE)
+ set(INKSCAPE_FOUND True)
+ # check for inkscape >= 1.0
+ execute_process(COMMAND ${INKSCAPE} -z -e OUTPUT_QUIET ERROR_QUIET RESULT_VARIABLE INKSCAPE_RETURNED_ONE)
+ # if error (i.e. 1) was returned we have new inkscape version (>=1.0)
+ if(INKSCAPE_RETURNED_ONE)
+ set(INKSCAPE_NEW_VERSION True)
+ endif()
+
+endif(INKSCAPE)
+
+# text for feature summary
+set_package_properties("Inkscape" PROPERTIES
+ DESCRIPTION "converts SVG images"
+ URL "www.inkscape.org"
+ PURPOSE "To generate the documentation with LaTeX")
--- /dev/null
+# Find module for LatexMk
+#
+# This module honors the following input variables:
+# LATEXMK_ROOT
+# Directory to take the latexmk executable from
+# LATEXMK_DIR
+# Alternative variable instead of LATEXMK_ROOT
+#
+# The module checks for the presence of the LatexMk executable
+# and sets the following variables:
+#
+# LATEXMK_FOUND
+# Whether the latexmk executable was found on the system
+# LATEXMK_EXECUTABLE
+# The full path of the found latexmk executable
+# LATEXMK_VERSION_STRING
+# A well readable string of the latexmk version.
+# LATEXMK_VERSION_MAJOR
+# The major version of the latexmk executable
+# LATEXMK_VERSION_MINOR
+# The minor version of the latexmk executable
+#
+# Copyright (c) 2017, Dominic Kempf, Steffen Müthing
+#
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer.
+#
+# * Redistributions in binary form must reproduce the above copyright notice, this
+# list of conditions and the following disclaimer in the documentation and/or
+# other materials provided with the distribution.
+#
+# * Neither the name of the Universität Heidelberg nor the names of its
+# contributors may be used to endorse or promote products derived from this
+# software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+# Find the actual program
+find_program(LATEXMK_EXECUTABLE
+ latexmk
+ PATHS ${LATEXMK_ROOT}
+ ${LATEXMK_DIR}
+ )
+
+# If found, figure out a version
+if(LATEXMK_EXECUTABLE)
+ execute_process(COMMAND ${LATEXMK_EXECUTABLE} --version
+ OUTPUT_VARIABLE LATEXMK_VERSION_LINE
+ OUTPUT_STRIP_TRAILING_WHITESPACE
+ )
+ string(REGEX MATCH "Version.*$" LATEXMK_VERSION_STRING "${LATEXMK_VERSION_LINE}")
+ string(REGEX REPLACE "([0-9]+)\\." "\\1" LATEXMK_VERSION_MINOR "${LATEXMK_VERSION_STRING}")
+ string(REGEX REPLACE "[0-9]+\\.([0-9a-z]+)" "\\1" LATEXMK_VERSION_MAJOR "${LATEXMK_VERSION_STRING}")
+endif()
+
+include(FindPackageHandleStandardArgs)
+find_package_handle_standard_args(LatexMk
+ FOUND_VAR LATEXMK_FOUND
+ REQUIRED_VARS LATEXMK_EXECUTABLE
+ VERSION_VAR LATEXMK_VERSION_STRING)
--- /dev/null
+#[=======================================================================[.rst:
+FindMETIS
+---------
+
+Find Serial Graph Partitioning library METIS
+(see http://glaros.dtc.umn.edu/gkhome/metis/metis/overview)
+
+Imported targets
+^^^^^^^^^^^^^^^^
+
+This module defines the following :prop_tgt:`IMPORTED` target:
+
+``METIS::METIS``
+ The libraries, flags, and includes to use for METIS, if found.
+
+Result Variables
+^^^^^^^^^^^^^^^^
+
+This module defines the following variables:
+
+``METIS_FOUND``
+ The METIS library with all its dependencies is found
+
+Cache Variables
+^^^^^^^^^^^^^^^
+
+The following variables may be set to influence this module's behavior:
+
+``METIS_INCLUDE_DIR``
+ Include directory of METIS
+
+``METIS_LIBRARY``
+ Full path to the METIS library
+
+``METIS_API_VERSION``
+ This variable specifies the METIS API version provided by the scotch-metis library. This
+ is required for Scotch >= 6.0.7 versions if it is not detected automatically. The
+ variable may be set to 3 to indicate that scotch implements the METIS API v3 (default
+ for older Scotch versions), or it can be set to 5 to indicate that v5 of the METIS API
+ is provided. This variable corresponds to the preprocessor flag `SCOTCH_METIS_VERSION`
+ that is used when compiling Scotch from source.
+#]=======================================================================]
+
+# Text for feature summary
+include(FeatureSummary)
+set_package_properties("METIS" PROPERTIES
+ DESCRIPTION "Serial Graph Partitioning"
+)
+
+# The METIS API version provided by the METIS or scotch-metis library
+set(METIS_API_VERSION 0 CACHE STRING
+ "METIS API version provided by METIS or scotch-metis library")
+
+# Try to locate METIS header
+find_path(METIS_INCLUDE_DIR metis.h
+ PATH_SUFFIXES metis)
+
+# Determine version of METIS installation
+find_file(METIS_HEADER_FILE metis.h
+ PATHS ${METIS_INCLUDE_DIR}
+ NO_DEFAULT_PATH)
+if(METIS_HEADER_FILE)
+ file(READ "${METIS_HEADER_FILE}" metisheader)
+ string(REGEX REPLACE ".*#define METIS_VER_MAJOR[ ]+([0-9]+).*" "\\1"
+ METIS_MAJOR_VERSION "${metisheader}")
+ string(REGEX REPLACE ".*#define METIS_VER_MINOR[ ]+([0-9]+).*" "\\1"
+ METIS_MINOR_VERSION "${metisheader}")
+ if(METIS_MAJOR_VERSION GREATER_EQUAL 0 AND METIS_MINOR_VERSION GREATER_EQUAL 0)
+ set(METIS_VERSION "${METIS_MAJOR_VERSION}.${METIS_MINOR_VERSION}")
+
+ # Specify an api version to be used in config.h files or compile flags
+ if(NOT METIS_API_VERSION)
+ if(METIS_MAJOR_VERSION GREATER_EQUAL 3 AND METIS_MAJOR_VERSION LESS 5)
+ set(METIS_API_VERSION "3")
+ else()
+ set(METIS_API_VERSION "${METIS_MAJOR_VERSION}")
+ endif()
+ endif()
+ else()
+ unset(METIS_MAJOR_VERSION)
+ unset(METIS_MINOR_VERSION)
+ endif()
+
+ # test whether header file is actually the scotch-metis header
+ string(FIND "${metisheader}" "SCOTCH_METIS_PREFIX" IS_SCOTCH_METIS_HEADER)
+ if(IS_SCOTCH_METIS_HEADER EQUAL "-1")
+ set(IS_SCOTCH_METIS_HEADER FALSE)
+ else()
+ set(IS_SCOTCH_METIS_HEADER TRUE)
+ endif()
+endif()
+unset(METIS_HEADER_FILE CACHE)
+
+# search for the METIS library or for the scotch-metis wrapper library
+if(IS_SCOTCH_METIS_HEADER)
+ find_library(METIS_LIBRARY scotchmetis)
+else()
+ find_library(METIS_LIBRARY metis)
+endif()
+
+# We need to check whether we need to link m, copy the lazy solution
+# from FindBLAS and FindLAPACK here.
+if(METIS_LIBRARY AND NOT WIN32)
+ set(METIS_NEEDS_LIBM 1)
+endif()
+
+mark_as_advanced(METIS_INCLUDE_DIR METIS_LIBRARY METIS_NEEDS_LIBM METIS_API_VERSION)
+
+# If scotch is requested, find package PTScotch and check version compatibility:
+# Scotch provides METIS-3 interface only in version < 6.07, but provides an option to
+# select the API-version in later Scotch releases
+if(IS_SCOTCH_METIS_HEADER)
+ find_package(PTScotch)
+ set(HAVE_SCOTCH_METIS ${PTScotch_SCOTCH_FOUND})
+ if (PTScotch_SCOTCH_FOUND AND NOT METIS_API_VERSION)
+ if(PTScotch_VERSION VERSION_LESS "6.0.7")
+ set(METIS_API_VERSION "3")
+ else()
+ # try to figure out the METIS_API_VERSION by checking for symbols in the library
+ include(CheckSymbolExists)
+ include(CMakePushCheckState)
+ find_package(Threads)
+ cmake_push_check_state()
+ set(CMAKE_REQUIRED_LIBRARIES ${METIS_LIBRARY} ${SCOTCH_LIBRARY} ${SCOTCHERR_LIBRARY} ${CMAKE_THREAD_LIBS_INIT})
+ if(METIS_NEEDS_LIBM)
+ list(APPEND CMAKE_REQUIRED_LIBRARIES m)
+ endif()
+ set(CMAKE_REQUIRED_INCLUDES ${METIS_INCLUDE_DIR} ${SCOTCH_INCLUDE_DIR})
+
+ set(CMAKE_REQUIRED_DEFINITIONS "-DSCOTCH_METIS_VERSION=3")
+ check_symbol_exists("METIS_PartGraphVKway" "stdio.h;stdint.h;scotch.h;metis.h" IS_SCOTCH_METIS_API_V3)
+ if(IS_SCOTCH_METIS_API_V3)
+ set(METIS_API_VERSION "3")
+ else()
+ set(CMAKE_REQUIRED_DEFINITIONS "-DSCOTCH_METIS_VERSION=5")
+ check_symbol_exists("METIS_PartGraphKway" "stdio.h;stdint.h;scotch.h;metis.h" IS_SCOTCH_METIS_API_V5)
+ if(IS_SCOTCH_METIS_API_V5)
+ set(METIS_API_VERSION "5")
+ endif()
+ endif()
+ cmake_pop_check_state()
+ endif()
+ endif()
+endif()
+
+# Behave like a CMake module is supposed to behave
+include(FindPackageHandleStandardArgs)
+find_package_handle_standard_args("METIS"
+ REQUIRED_VARS
+ METIS_LIBRARY METIS_INCLUDE_DIR METIS_API_VERSION
+ VERSION_VAR
+ METIS_VERSION
+)
+
+# If both headers and library are found, create imported target
+if(METIS_FOUND AND NOT TARGET METIS::METIS)
+ add_library(METIS::METIS UNKNOWN IMPORTED)
+ set_target_properties(METIS::METIS PROPERTIES
+ IMPORTED_LOCATION ${METIS_LIBRARY}
+ INTERFACE_INCLUDE_DIRECTORIES ${METIS_INCLUDE_DIR}
+ INTERFACE_COMPILE_DEFINITIONS METIS_API_VERSION=${METIS_API_VERSION}
+ )
+
+ # Link against libm if needed
+ if(METIS_NEEDS_LIBM)
+ set_property(TARGET METIS::METIS APPEND PROPERTY
+ INTERFACE_LINK_LIBRARIES m)
+ endif()
+
+ # Link against Scotch library if option is enabled
+ if(IS_SCOTCH_METIS_HEADER AND PTScotch_FOUND)
+ set_property(TARGET METIS::METIS APPEND PROPERTY
+ INTERFACE_LINK_LIBRARIES PTScotch::Scotch)
+ set_property(TARGET METIS::METIS APPEND PROPERTY
+ INTERFACE_COMPILE_DEFINITIONS
+ SCOTCH_METIS_VERSION=${METIS_API_VERSION})
+ endif()
+endif()
--- /dev/null
+#[=======================================================================[.rst:
+FindPTScotch
+------------
+
+Find library PTScotch, i.e. Software package and libraries for sequential
+and parallel graph partitioning, static mapping and clustering, sequential
+mesh and hypergraph partitioning, and sequential and parallel sparse matrix
+block ordering
+
+Components
+^^^^^^^^^^
+
+The PTScotch module allows to search for the following components
+
+``SCOTCH``
+ Sequential version of Scotch
+``PTSCOTCH``
+ Parallel version of Scotch. Requires MPI.
+
+Imported targets
+^^^^^^^^^^^^^^^^
+
+This module defines the following :prop_tgt:`IMPORTED` target:
+
+``PTScotch::Scotch``
+ The sequential Scotch library to link against
+``PTScotch::PTScotch``
+ The parallel PTScotch library to link against
+
+Result Variables
+^^^^^^^^^^^^^^^^
+
+This module defines the following variables:
+
+``PTScotch_FOUND``
+ The Scotch and/or PTScotch library with all its dependencies is found
+``PTScotch_SCOTCH_FOUND``
+ The sequential Scotch library is found
+``PTScotch_PTSCOTCH_FOUND``
+ The parallel PTScotch library is found
+``PTScotch_VERSION``
+ Version of Scotch that is found
+
+Cache Variables
+^^^^^^^^^^^^^^^
+
+The following variables may be set to influence this module's behavior:
+
+``PTSCOTCH_SUFFIX``
+ Scotch might be compiled using different integer sizes (int32, int64, long).
+ When this is set the headers and libaries are search under the suffix
+ :code:`include/scotch-${PTSCOTCH_SUFFIX}`, and :code:`lib/scotch-${PTSCOTCH_SUFFIX}`,
+ respectively.
+
+``SCOTCH_INCLUDE_DIR``
+ Include directory where the scotch.h is found.
+
+``PTSCOTCH_INCLUDE_DIR``
+ Include directory where the ptscotch.h is found.
+
+``SCOTCH_LIBRARY`` and ``SCOTCHERR_LIBRARY``
+ Full path to the scotch library
+
+``PTSCOTCH_LIBRARY`` and ``PTSCOTCHERR_LIBRARY``
+ Full path to the ptscotch library
+
+#]=======================================================================]
+
+# text for feature summary
+include(FeatureSummary)
+set_package_properties("PTScotch" PROPERTIES
+ DESCRIPTION "Sequential and Parallel Graph Partitioning"
+)
+
+# find dependency for PTScotch
+include(CMakeFindDependencyMacro)
+find_package(MPI QUIET)
+
+# search directory might have the PATH_SUFFIX scotch-SUFFIX
+if(PTSCOTCH_SUFFIX)
+ set(PATH_SUFFIXES "scotch-${PTSCOTCH_SUFFIX}")
+else()
+ set(PATH_SUFFIXES "scotch")
+endif()
+
+# Try to find the include files
+find_path(SCOTCH_INCLUDE_DIR scotch.h
+ PATH_SUFFIXES ${PATH_SUFFIXES})
+
+find_path(PTSCOTCH_INCLUDE_DIR ptscotch.h
+ HINTS ${SCOTCH_INCLUDE_DIR}
+ PATH_SUFFIXES ${PATH_SUFFIXES})
+
+# Try to find the (pt)scotch libraries
+find_library(SCOTCH_LIBRARY scotch)
+find_library(SCOTCHERR_LIBRARY scotcherr)
+find_library(PTSCOTCH_LIBRARY ptscotch)
+find_library(PTSCOTCHERR_LIBRARY ptscotcherr)
+
+mark_as_advanced(SCOTCH_INCLUDE_DIR SCOTCH_LIBRARY SCOTCHERR_LIBRARY
+ PTSCOTCH_INCLUDE_DIR PTSCOTCH_LIBRARY PTSCOTCHERR_LIBRARY)
+
+# check version of (PT)Scotch
+find_file(SCOTCH_HEADER "scotch.h"
+ HINTS ${SCOTCH_INCLUDE_DIR}
+ NO_DEFAULT_PATH)
+if(SCOTCH_HEADER)
+ file(READ "${SCOTCH_HEADER}" scotchheader)
+ string(REGEX REPLACE ".*#define SCOTCH_VERSION[ ]+([0-9]+).*" "\\1"
+ SCOTCH_MAJOR_VERSION "${scotchheader}")
+ string(REGEX REPLACE ".*#define SCOTCH_RELEASE[ ]+([0-9]+).*" "\\1"
+ SCOTCH_MINOR_VERSION "${scotchheader}")
+ string(REGEX REPLACE ".*#define SCOTCH_PATCHLEVEL[ ]+([0-9]+).*" "\\1"
+ SCOTCH_PREFIX_VERSION "${scotchheader}")
+ if(SCOTCH_MAJOR_VERSION GREATER_EQUAL 0)
+ set(PTScotch_VERSION "${SCOTCH_MAJOR_VERSION}")
+ endif()
+ if (SCOTCH_MINOR_VERSION GREATER_EQUAL 0)
+ set(PTScotch_VERSION "${PTScotch_VERSION}.${SCOTCH_MINOR_VERSION}")
+ endif()
+ if (SCOTCH_PREFIX_VERSION GREATER_EQUAL 0)
+ set(PTScotch_VERSION "${PTScotch_VERSION}.${SCOTCH_PREFIX_VERSION}")
+ endif()
+endif()
+unset(SCOTCH_HEADER CACHE)
+
+# set if (PT)Scotch components found
+if (SCOTCH_INCLUDE_DIR AND SCOTCH_LIBRARY AND SCOTCHERR_LIBRARY)
+ set(PTScotch_SCOTCH_FOUND TRUE)
+endif ()
+
+if (PTSCOTCH_INCLUDE_DIR AND PTSCOTCH_LIBRARY AND PTSCOTCHERR_LIBRARY AND MPI_FOUND)
+ set(PTScotch_PTSCOTCH_FOUND TRUE)
+endif ()
+
+# dependencies between components
+if (NOT PTScotch_SCOTCH_FOUND)
+ set(PTScotch_PTSCOTCH_FOUND FALSE)
+endif ()
+
+# behave like a CMake module is supposed to behave
+include(FindPackageHandleStandardArgs)
+find_package_handle_standard_args("PTScotch"
+ REQUIRED_VARS
+ SCOTCH_LIBRARY SCOTCHERR_LIBRARY SCOTCH_INCLUDE_DIR
+ VERSION_VAR
+ PTScotch_VERSION
+ HANDLE_COMPONENTS
+)
+
+if(PTScotch_FOUND)
+ # Define an imported target for the sequential Scotch library
+ if(PTScotch_SCOTCH_FOUND AND NOT TARGET PTScotch::Scotch)
+ add_library(PTScotch::Scotch UNKNOWN IMPORTED)
+ set_target_properties(PTScotch::Scotch PROPERTIES
+ IMPORTED_LOCATION ${SCOTCH_LIBRARY}
+ INTERFACE_INCLUDE_DIRECTORIES ${SCOTCH_INCLUDE_DIR}
+ INTERFACE_LINK_LIBRARIES ${SCOTCHERR_LIBRARY}
+ )
+ endif()
+
+ # Define an imported target for the parallel PTScotch library
+ if(PTScotch_SCOTCH_FOUND AND PTScotch_PTSCOTCH_FOUND AND NOT TARGET PTScotch::PTScotch)
+ add_library(PTScotch::PTScotch UNKNOWN IMPORTED)
+ set_target_properties(PTScotch::PTScotch PROPERTIES
+ IMPORTED_LOCATION ${PTSCOTCH_LIBRARY}
+ INTERFACE_INCLUDE_DIRECTORIES ${PTSCOTCH_INCLUDE_DIR}
+ INTERFACE_LINK_LIBRARIES ${PTSCOTCHERR_LIBRARY}
+ )
+ target_link_libraries(PTScotch::PTScotch
+ INTERFACE PTScotch::Scotch MPI::MPI_C)
+ endif()
+endif()
--- /dev/null
+#[=======================================================================[.rst:
+FindParMETIS
+------------
+
+Find Parallel Graph Partitioning library ParMETIS
+(see http://glaros.dtc.umn.edu/gkhome/metis/parmetis/overview)
+
+Imported targets
+^^^^^^^^^^^^^^^^
+
+This module defines the following :prop_tgt:`IMPORTED` target:
+
+``ParMETIS::ParMETIS``
+ The libraries, flags, and includes to use for ParMETIS, if found.
+
+Result Variables
+^^^^^^^^^^^^^^^^
+
+This module defines the following variables:
+
+``ParMETIS_FOUND``
+ The ParMETIS library with all its dependencies is found
+
+Cache Variables
+^^^^^^^^^^^^^^^
+
+The following variables may be set to influence this module's behavior:
+
+``PARMETIS_INCLUDE_DIR``
+ Include directory where the parmetis.h is found.
+
+``PARMETIS_LIBRARY``
+ Full path to the ParMETIS library
+
+#]=======================================================================]
+
+# text for feature summary
+include(FeatureSummary)
+set_package_properties("ParMETIS" PROPERTIES
+ DESCRIPTION "Parallel Graph Partitioning"
+)
+
+find_path(PARMETIS_INCLUDE_DIR parmetis.h
+ PATH_SUFFIXES parmetis)
+
+# determine version of ParMETIS installation
+find_file(PARMETIS_HEADER_FILE parmetis.h
+ PATHS ${PARMETIS_INCLUDE_DIR}
+ NO_DEFAULT_PATH)
+if(PARMETIS_HEADER_FILE)
+ file(READ "${PARMETIS_HEADER_FILE}" parmetisheader)
+ string(REGEX REPLACE ".*#define PARMETIS_MAJOR_VERSION[ ]+([0-9]+).*" "\\1"
+ ParMETIS_MAJOR_VERSION "${parmetisheader}")
+ string(REGEX REPLACE ".*#define PARMETIS_MINOR_VERSION[ ]+([0-9]+).*" "\\1"
+ ParMETIS_MINOR_VERSION "${parmetisheader}")
+ if(ParMETIS_MAJOR_VERSION GREATER_EQUAL 0 AND ParMETIS_MINOR_VERSION GREATER_EQUAL 0)
+ set(ParMETIS_VERSION "${ParMETIS_MAJOR_VERSION}.${ParMETIS_MINOR_VERSION}")
+ endif()
+
+ # test whether header file is actually the ptscotch-parmetis header
+ string(FIND "${parmetisheader}" "SCOTCH_METIS_PREFIX" IS_PTSCOTCH_PARMETIS_HEADER)
+ if(IS_PTSCOTCH_PARMETIS_HEADER EQUAL "-1")
+ set(IS_PTSCOTCH_PARMETIS_HEADER FALSE)
+ else()
+ set(IS_PTSCOTCH_PARMETIS_HEADER TRUE)
+ endif()
+endif()
+unset(PARMETIS_HEADER_FILE CACHE)
+
+
+# search ParMETIS library
+if(IS_PTSCOTCH_PARMETIS_HEADER)
+ find_library(PARMETIS_LIBRARY ptscotchparmetis)
+else()
+ find_library(PARMETIS_LIBRARY parmetis)
+endif()
+
+mark_as_advanced(PARMETIS_INCLUDE_DIR PARMETIS_LIBRARY)
+
+# minimal requires METIS version 5.0 for ParMETIS >= 4.0
+if(ParMETIS_VERSION VERSION_GREATER_EQUAL "4.0")
+ set(METIS_MIN_VERSION "5.0")
+endif()
+
+# find package dependencies first
+find_package(METIS ${METIS_MIN_VERSION})
+find_package(MPI COMPONENTS C)
+
+# set a list of required dependencies for ParMETIS
+set(PARMETIS_DEPENDENCIES METIS_FOUND MPI_FOUND)
+
+# If ptscotch-parmetis is requested, find package PTScotch
+if(IS_PTSCOTCH_PARMETIS_HEADER)
+ find_package(PTScotch)
+ set(HAVE_PTSCOTCH_PARMETIS ${PTScotch_PTSCOTCH_FOUND})
+ list(APPEND PARMETIS_DEPENDENCIES PTScotch_PTSCOTCH_FOUND)
+endif()
+
+# behave like a CMake module is supposed to behave
+include(FindPackageHandleStandardArgs)
+find_package_handle_standard_args("ParMETIS"
+ REQUIRED_VARS
+ PARMETIS_LIBRARY PARMETIS_INCLUDE_DIR ${PARMETIS_DEPENDENCIES}
+ VERSION_VAR
+ ParMETIS_VERSION
+)
+
+# create imported target ParMETIS::ParMETIS
+if(PARMETIS_FOUND AND NOT TARGET ParMETIS::ParMETIS)
+ add_library(ParMETIS::ParMETIS UNKNOWN IMPORTED)
+ set_target_properties(ParMETIS::ParMETIS PROPERTIES
+ IMPORTED_LOCATION ${PARMETIS_LIBRARY}
+ INTERFACE_INCLUDE_DIRECTORIES ${PARMETIS_INCLUDE_DIR}
+ INTERFACE_LINK_LIBRARIES "METIS::METIS;MPI::MPI_C"
+ INTERFACE_COMPILE_DEFINITIONS "MPICH_SKIP_MPICXX;OMPI_SKIP_MPICXX"
+ )
+
+ # link against PTScotch if needed
+ if(IS_PTSCOTCH_PARMETIS_HEADER AND PTScotch_PTSCOTCH_FOUND)
+ set_property(TARGET ParMETIS::ParMETIS APPEND PROPERTY
+ INTERFACE_LINK_LIBRARIES PTScotch::PTScotch)
+ endif()
+endif()
--- /dev/null
+install(FILES
+ FindPkgConfig.cmake
+ DESTINATION ${DUNE_INSTALL_MODULEDIR}/FindPkgConfig)
\ No newline at end of file
--- /dev/null
+# Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+# file Copyright.txt or https://cmake.org/licensing for details.
+
+# Note, this is a backport of the cmake/Modules/FindPkgConfig.cmake file from cmake version 3.19.4
+
+#[========================================[.rst:
+FindPkgConfig
+-------------
+
+A ``pkg-config`` module for CMake.
+
+Finds the ``pkg-config`` executable and adds the :command:`pkg_get_variable`,
+:command:`pkg_check_modules` and :command:`pkg_search_module` commands. The
+following variables will also be set:
+
+``PKG_CONFIG_FOUND``
+ if pkg-config executable was found
+``PKG_CONFIG_EXECUTABLE``
+ pathname of the pkg-config program
+``PKG_CONFIG_VERSION_STRING``
+ version of pkg-config (since CMake 2.8.8)
+
+#]========================================]
+
+cmake_policy(PUSH)
+cmake_policy(SET CMP0054 NEW) # if() quoted variables not dereferenced
+cmake_policy(SET CMP0057 NEW) # if IN_LIST
+
+### Common stuff ####
+set(PKG_CONFIG_VERSION 1)
+
+# find pkg-config, use PKG_CONFIG if set
+if((NOT PKG_CONFIG_EXECUTABLE) AND (NOT "$ENV{PKG_CONFIG}" STREQUAL ""))
+ set(PKG_CONFIG_EXECUTABLE "$ENV{PKG_CONFIG}" CACHE FILEPATH "pkg-config executable")
+endif()
+
+set(PKG_CONFIG_NAMES "pkg-config")
+if(CMAKE_HOST_WIN32)
+ list(PREPEND PKG_CONFIG_NAMES "pkg-config.bat")
+endif()
+
+find_program(PKG_CONFIG_EXECUTABLE NAMES ${PKG_CONFIG_NAMES} DOC "pkg-config executable")
+mark_as_advanced(PKG_CONFIG_EXECUTABLE)
+
+set(_PKG_CONFIG_FAILURE_MESSAGE "")
+if (PKG_CONFIG_EXECUTABLE)
+ execute_process(COMMAND ${PKG_CONFIG_EXECUTABLE} --version
+ OUTPUT_VARIABLE PKG_CONFIG_VERSION_STRING OUTPUT_STRIP_TRAILING_WHITESPACE
+ ERROR_VARIABLE _PKG_CONFIG_VERSION_ERROR ERROR_STRIP_TRAILING_WHITESPACE
+ RESULT_VARIABLE _PKG_CONFIG_VERSION_RESULT
+ )
+
+ if (NOT _PKG_CONFIG_VERSION_RESULT EQUAL 0)
+ string(REPLACE "\n" "\n " _PKG_CONFIG_VERSION_ERROR " ${_PKG_CONFIG_VERSION_ERROR}")
+ string(APPEND _PKG_CONFIG_FAILURE_MESSAGE
+ "The command\n"
+ " \"${PKG_CONFIG_EXECUTABLE}\" --version\n"
+ " failed with output:\n${PKG_CONFIG_VERSION_STRING}\n"
+ " stderr: \n${_PKG_CONFIG_VERSION_ERROR}\n"
+ " result: \n${_PKG_CONFIG_VERSION_RESULT}"
+ )
+ set(PKG_CONFIG_EXECUTABLE "")
+ unset(PKG_CONFIG_VERSION_STRING)
+ endif ()
+ unset(_PKG_CONFIG_VERSION_RESULT)
+endif ()
+
+include(FindPackageHandleStandardArgs)
+find_package_handle_standard_args(PkgConfig
+ REQUIRED_VARS PKG_CONFIG_EXECUTABLE
+ FAIL_MESSAGE "${_PKG_CONFIG_FAILURE_MESSAGE}"
+ VERSION_VAR PKG_CONFIG_VERSION_STRING)
+
+# This is needed because the module name is "PkgConfig" but the name of
+# this variable has always been PKG_CONFIG_FOUND so this isn't automatically
+# handled by FPHSA.
+set(PKG_CONFIG_FOUND "${PKGCONFIG_FOUND}")
+
+# Unsets the given variables
+macro(_pkgconfig_unset var)
+ set(${var} "" CACHE INTERNAL "")
+endmacro()
+
+macro(_pkgconfig_set var value)
+ set(${var} ${value} CACHE INTERNAL "")
+endmacro()
+
+# Invokes pkgconfig, cleans up the result and sets variables
+macro(_pkgconfig_invoke _pkglist _prefix _varname _regexp)
+ set(_pkgconfig_invoke_result)
+
+ execute_process(
+ COMMAND ${PKG_CONFIG_EXECUTABLE} ${ARGN} ${_pkglist}
+ OUTPUT_VARIABLE _pkgconfig_invoke_result
+ RESULT_VARIABLE _pkgconfig_failed
+ OUTPUT_STRIP_TRAILING_WHITESPACE)
+
+ if (_pkgconfig_failed)
+ set(_pkgconfig_${_varname} "")
+ _pkgconfig_unset(${_prefix}_${_varname})
+ else()
+ string(REGEX REPLACE "[\r\n]" " " _pkgconfig_invoke_result "${_pkgconfig_invoke_result}")
+
+ if (NOT ${_regexp} STREQUAL "")
+ string(REGEX REPLACE "${_regexp}" " " _pkgconfig_invoke_result "${_pkgconfig_invoke_result}")
+ endif()
+
+ separate_arguments(_pkgconfig_invoke_result)
+
+ #message(STATUS " ${_varname} ... ${_pkgconfig_invoke_result}")
+ set(_pkgconfig_${_varname} ${_pkgconfig_invoke_result})
+ _pkgconfig_set(${_prefix}_${_varname} "${_pkgconfig_invoke_result}")
+ endif()
+endmacro()
+
+# Internal version of pkg_get_variable; expects PKG_CONFIG_PATH to already be set
+function (_pkg_get_variable result pkg variable)
+ _pkgconfig_invoke("${pkg}" "prefix" "result" "" "--variable=${variable}")
+ set("${result}"
+ "${prefix_result}"
+ PARENT_SCOPE)
+endfunction ()
+
+# Invokes pkgconfig two times; once without '--static' and once with
+# '--static'
+macro(_pkgconfig_invoke_dyn _pkglist _prefix _varname cleanup_regexp)
+ _pkgconfig_invoke("${_pkglist}" ${_prefix} ${_varname} "${cleanup_regexp}" ${ARGN})
+ _pkgconfig_invoke("${_pkglist}" ${_prefix} STATIC_${_varname} "${cleanup_regexp}" --static ${ARGN})
+endmacro()
+
+# Splits given arguments into options and a package list
+macro(_pkgconfig_parse_options _result _is_req _is_silent _no_cmake_path _no_cmake_environment_path _imp_target _imp_target_global)
+ set(${_is_req} 0)
+ set(${_is_silent} 0)
+ set(${_no_cmake_path} 0)
+ set(${_no_cmake_environment_path} 0)
+ set(${_imp_target} 0)
+ set(${_imp_target_global} 0)
+ if(DEFINED PKG_CONFIG_USE_CMAKE_PREFIX_PATH)
+ if(NOT PKG_CONFIG_USE_CMAKE_PREFIX_PATH)
+ set(${_no_cmake_path} 1)
+ set(${_no_cmake_environment_path} 1)
+ endif()
+ elseif(CMAKE_MINIMUM_REQUIRED_VERSION VERSION_LESS 3.1)
+ set(${_no_cmake_path} 1)
+ set(${_no_cmake_environment_path} 1)
+ endif()
+
+ foreach(_pkg ${ARGN})
+ if (_pkg STREQUAL "REQUIRED")
+ set(${_is_req} 1)
+ endif ()
+ if (_pkg STREQUAL "QUIET")
+ set(${_is_silent} 1)
+ endif ()
+ if (_pkg STREQUAL "NO_CMAKE_PATH")
+ set(${_no_cmake_path} 1)
+ endif()
+ if (_pkg STREQUAL "NO_CMAKE_ENVIRONMENT_PATH")
+ set(${_no_cmake_environment_path} 1)
+ endif()
+ if (_pkg STREQUAL "IMPORTED_TARGET")
+ set(${_imp_target} 1)
+ endif()
+ if (_pkg STREQUAL "GLOBAL")
+ set(${_imp_target_global} 1)
+ endif()
+ endforeach()
+
+ if (${_imp_target_global} AND NOT ${_imp_target})
+ message(SEND_ERROR "the argument GLOBAL may only be used together with IMPORTED_TARGET")
+ endif()
+
+ set(${_result} ${ARGN})
+ list(REMOVE_ITEM ${_result} "REQUIRED")
+ list(REMOVE_ITEM ${_result} "QUIET")
+ list(REMOVE_ITEM ${_result} "NO_CMAKE_PATH")
+ list(REMOVE_ITEM ${_result} "NO_CMAKE_ENVIRONMENT_PATH")
+ list(REMOVE_ITEM ${_result} "IMPORTED_TARGET")
+ list(REMOVE_ITEM ${_result} "GLOBAL")
+endmacro()
+
+# Add the content of a variable or an environment variable to a list of
+# paths
+# Usage:
+# - _pkgconfig_add_extra_path(_extra_paths VAR)
+# - _pkgconfig_add_extra_path(_extra_paths ENV VAR)
+function(_pkgconfig_add_extra_path _extra_paths_var _var)
+ set(_is_env 0)
+ if(ARGC GREATER 2 AND _var STREQUAL "ENV")
+ set(_var ${ARGV2})
+ set(_is_env 1)
+ endif()
+ if(NOT _is_env)
+ if(NOT "${${_var}}" STREQUAL "")
+ list(APPEND ${_extra_paths_var} ${${_var}})
+ endif()
+ else()
+ if(NOT "$ENV{${_var}}" STREQUAL "")
+ file(TO_CMAKE_PATH "$ENV{${_var}}" _path)
+ list(APPEND ${_extra_paths_var} ${_path})
+ unset(_path)
+ endif()
+ endif()
+ set(${_extra_paths_var} ${${_extra_paths_var}} PARENT_SCOPE)
+endfunction()
+
+# scan the LDFLAGS returned by pkg-config for library directories and
+# libraries, figure out the absolute paths of that libraries in the
+# given directories
+function(_pkg_find_libs _prefix _no_cmake_path _no_cmake_environment_path)
+ unset(_libs)
+ unset(_find_opts)
+
+ # set the options that are used as long as the .pc file does not provide a library
+ # path to look into
+ if(_no_cmake_path)
+ list(APPEND _find_opts "NO_CMAKE_PATH")
+ endif()
+ if(_no_cmake_environment_path)
+ list(APPEND _find_opts "NO_CMAKE_ENVIRONMENT_PATH")
+ endif()
+
+ unset(_search_paths)
+ unset(_next_is_framework)
+ foreach (flag IN LISTS ${_prefix}_LDFLAGS)
+ if (_next_is_framework)
+ list(APPEND _libs "-framework ${flag}")
+ unset(_next_is_framework)
+ continue()
+ endif ()
+ if (flag MATCHES "^-L(.*)")
+ list(APPEND _search_paths ${CMAKE_MATCH_1})
+ continue()
+ endif()
+ if (flag MATCHES "^-l(.*)")
+ set(_pkg_search "${CMAKE_MATCH_1}")
+ else()
+ if (flag STREQUAL "-framework")
+ set(_next_is_framework TRUE)
+ endif ()
+ continue()
+ endif()
+
+ if(_search_paths)
+ # Firstly search in -L paths
+ find_library(pkgcfg_lib_${_prefix}_${_pkg_search}
+ NAMES ${_pkg_search}
+ HINTS ${_search_paths} NO_DEFAULT_PATH)
+ endif()
+ find_library(pkgcfg_lib_${_prefix}_${_pkg_search}
+ NAMES ${_pkg_search}
+ ${_find_opts})
+ mark_as_advanced(pkgcfg_lib_${_prefix}_${_pkg_search})
+ if(pkgcfg_lib_${_prefix}_${_pkg_search})
+ list(APPEND _libs "${pkgcfg_lib_${_prefix}_${_pkg_search}}")
+ else()
+ list(APPEND _libs ${_pkg_search})
+ endif()
+ endforeach()
+
+ set(${_prefix}_LINK_LIBRARIES "${_libs}" PARENT_SCOPE)
+endfunction()
+
+# create an imported target from all the information returned by pkg-config
+function(_pkg_create_imp_target _prefix _imp_target_global)
+ # only create the target if it is linkable, i.e. no executables
+ if (NOT TARGET PkgConfig::${_prefix}
+ AND ( ${_prefix}_INCLUDE_DIRS OR ${_prefix}_LINK_LIBRARIES OR ${_prefix}_LDFLAGS_OTHER OR ${_prefix}_CFLAGS_OTHER ))
+ if(${_imp_target_global})
+ set(_global_opt "GLOBAL")
+ else()
+ unset(_global_opt)
+ endif()
+ add_library(PkgConfig::${_prefix} INTERFACE IMPORTED ${_global_opt})
+
+ if(${_prefix}_INCLUDE_DIRS)
+ set_property(TARGET PkgConfig::${_prefix} PROPERTY
+ INTERFACE_INCLUDE_DIRECTORIES "${${_prefix}_INCLUDE_DIRS}")
+ endif()
+ if(${_prefix}_LINK_LIBRARIES)
+ set_property(TARGET PkgConfig::${_prefix} PROPERTY
+ INTERFACE_LINK_LIBRARIES "${${_prefix}_LINK_LIBRARIES}")
+ endif()
+ if(${_prefix}_LDFLAGS_OTHER)
+ set_property(TARGET PkgConfig::${_prefix} PROPERTY
+ INTERFACE_LINK_OPTIONS "${${_prefix}_LDFLAGS_OTHER}")
+ endif()
+ if(${_prefix}_CFLAGS_OTHER)
+ set_property(TARGET PkgConfig::${_prefix} PROPERTY
+ INTERFACE_COMPILE_OPTIONS "${${_prefix}_CFLAGS_OTHER}")
+ endif()
+ endif()
+endfunction()
+
+# recalculate the dynamic output
+# this is a macro and not a function so the result of _pkg_find_libs is automatically propagated
+macro(_pkg_recalculate _prefix _no_cmake_path _no_cmake_environment_path _imp_target _imp_target_global)
+ _pkg_find_libs(${_prefix} ${_no_cmake_path} ${_no_cmake_environment_path})
+ if(${_imp_target})
+ _pkg_create_imp_target(${_prefix} ${_imp_target_global})
+ endif()
+endmacro()
+
+###
+macro(_pkg_set_path_internal)
+ set(_extra_paths)
+
+ if(NOT _no_cmake_path)
+ _pkgconfig_add_extra_path(_extra_paths CMAKE_PREFIX_PATH)
+ _pkgconfig_add_extra_path(_extra_paths CMAKE_FRAMEWORK_PATH)
+ _pkgconfig_add_extra_path(_extra_paths CMAKE_APPBUNDLE_PATH)
+ endif()
+
+ if(NOT _no_cmake_environment_path)
+ _pkgconfig_add_extra_path(_extra_paths ENV CMAKE_PREFIX_PATH)
+ _pkgconfig_add_extra_path(_extra_paths ENV CMAKE_FRAMEWORK_PATH)
+ _pkgconfig_add_extra_path(_extra_paths ENV CMAKE_APPBUNDLE_PATH)
+ endif()
+
+ if(NOT _extra_paths STREQUAL "")
+ # Save the PKG_CONFIG_PATH environment variable, and add paths
+ # from the CMAKE_PREFIX_PATH variables
+ set(_pkgconfig_path_old "$ENV{PKG_CONFIG_PATH}")
+ set(_pkgconfig_path "${_pkgconfig_path_old}")
+ if(NOT _pkgconfig_path STREQUAL "")
+ file(TO_CMAKE_PATH "${_pkgconfig_path}" _pkgconfig_path)
+ endif()
+
+ # Create a list of the possible pkgconfig subfolder (depending on
+ # the system
+ set(_lib_dirs)
+ if(NOT DEFINED CMAKE_SYSTEM_NAME
+ OR (CMAKE_SYSTEM_NAME MATCHES "^(Linux|kFreeBSD|GNU)$"
+ AND NOT CMAKE_CROSSCOMPILING))
+ if(EXISTS "/etc/debian_version") # is this a debian system ?
+ if(CMAKE_LIBRARY_ARCHITECTURE)
+ list(APPEND _lib_dirs "lib/${CMAKE_LIBRARY_ARCHITECTURE}/pkgconfig")
+ endif()
+ else()
+ # not debian, check the FIND_LIBRARY_USE_LIB32_PATHS and FIND_LIBRARY_USE_LIB64_PATHS properties
+ get_property(uselib32 GLOBAL PROPERTY FIND_LIBRARY_USE_LIB32_PATHS)
+ if(uselib32 AND CMAKE_SIZEOF_VOID_P EQUAL 4)
+ list(APPEND _lib_dirs "lib32/pkgconfig")
+ endif()
+ get_property(uselib64 GLOBAL PROPERTY FIND_LIBRARY_USE_LIB64_PATHS)
+ if(uselib64 AND CMAKE_SIZEOF_VOID_P EQUAL 8)
+ list(APPEND _lib_dirs "lib64/pkgconfig")
+ endif()
+ get_property(uselibx32 GLOBAL PROPERTY FIND_LIBRARY_USE_LIBX32_PATHS)
+ if(uselibx32 AND CMAKE_INTERNAL_PLATFORM_ABI STREQUAL "ELF X32")
+ list(APPEND _lib_dirs "libx32/pkgconfig")
+ endif()
+ endif()
+ endif()
+ if(CMAKE_SYSTEM_NAME STREQUAL "FreeBSD" AND NOT CMAKE_CROSSCOMPILING)
+ list(APPEND _lib_dirs "libdata/pkgconfig")
+ endif()
+ list(APPEND _lib_dirs "lib/pkgconfig")
+ list(APPEND _lib_dirs "share/pkgconfig")
+
+ # Check if directories exist and eventually append them to the
+ # pkgconfig path list
+ foreach(_prefix_dir ${_extra_paths})
+ foreach(_lib_dir ${_lib_dirs})
+ if(EXISTS "${_prefix_dir}/${_lib_dir}")
+ list(APPEND _pkgconfig_path "${_prefix_dir}/${_lib_dir}")
+ list(REMOVE_DUPLICATES _pkgconfig_path)
+ endif()
+ endforeach()
+ endforeach()
+
+ # Prepare and set the environment variable
+ if(NOT _pkgconfig_path STREQUAL "")
+ # remove empty values from the list
+ list(REMOVE_ITEM _pkgconfig_path "")
+ file(TO_NATIVE_PATH "${_pkgconfig_path}" _pkgconfig_path)
+ if(CMAKE_HOST_UNIX)
+ string(REPLACE ";" ":" _pkgconfig_path "${_pkgconfig_path}")
+ string(REPLACE "\\ " " " _pkgconfig_path "${_pkgconfig_path}")
+ endif()
+ set(ENV{PKG_CONFIG_PATH} "${_pkgconfig_path}")
+ endif()
+
+ # Unset variables
+ unset(_lib_dirs)
+ unset(_pkgconfig_path)
+ endif()
+endmacro()
+
+macro(_pkg_restore_path_internal)
+ if(NOT _extra_paths STREQUAL "")
+ # Restore the environment variable
+ set(ENV{PKG_CONFIG_PATH} "${_pkgconfig_path_old}")
+ endif()
+
+ unset(_extra_paths)
+ unset(_pkgconfig_path_old)
+endmacro()
+
+# pkg-config returns frameworks in --libs-only-other
+# they need to be in ${_prefix}_LIBRARIES so "-framework a -framework b" does
+# not incorrectly be combined to "-framework a b"
+function(_pkgconfig_extract_frameworks _prefix)
+ set(ldflags "${${_prefix}_LDFLAGS_OTHER}")
+ list(FIND ldflags "-framework" FR_POS)
+ list(LENGTH ldflags LD_LENGTH)
+
+ # reduce length by 1 as we need "-framework" and the next entry
+ math(EXPR LD_LENGTH "${LD_LENGTH} - 1")
+ while (FR_POS GREATER -1 AND LD_LENGTH GREATER FR_POS)
+ list(REMOVE_AT ldflags ${FR_POS})
+ list(GET ldflags ${FR_POS} HEAD)
+ list(REMOVE_AT ldflags ${FR_POS})
+ math(EXPR LD_LENGTH "${LD_LENGTH} - 2")
+
+ list(APPEND LIBS "-framework ${HEAD}")
+
+ list(FIND ldflags "-framework" FR_POS)
+ endwhile ()
+ set(${_prefix}_LIBRARIES ${${_prefix}_LIBRARIES} ${LIBS} PARENT_SCOPE)
+ set(${_prefix}_LDFLAGS_OTHER "${ldflags}" PARENT_SCOPE)
+endfunction()
+
+# pkg-config returns -isystem include directories in --cflags-only-other,
+# depending on the version and if there is a space between -isystem and
+# the actual path
+function(_pkgconfig_extract_isystem _prefix)
+ set(cflags "${${_prefix}_CFLAGS_OTHER}")
+ set(outflags "")
+ set(incdirs "${${_prefix}_INCLUDE_DIRS}")
+
+ set(next_is_isystem FALSE)
+ foreach (THING IN LISTS cflags)
+ # This may filter "-isystem -isystem". That would not work anyway,
+ # so let it happen.
+ if (THING STREQUAL "-isystem")
+ set(next_is_isystem TRUE)
+ continue()
+ endif ()
+ if (next_is_isystem)
+ set(next_is_isystem FALSE)
+ list(APPEND incdirs "${THING}")
+ elseif (THING MATCHES "^-isystem")
+ string(SUBSTRING "${THING}" 8 -1 THING)
+ list(APPEND incdirs "${THING}")
+ else ()
+ list(APPEND outflags "${THING}")
+ endif ()
+ endforeach ()
+ set(${_prefix}_CFLAGS_OTHER "${outflags}" PARENT_SCOPE)
+ set(${_prefix}_INCLUDE_DIRS "${incdirs}" PARENT_SCOPE)
+endfunction()
+
+###
+macro(_pkg_check_modules_internal _is_required _is_silent _no_cmake_path _no_cmake_environment_path _imp_target _imp_target_global _prefix)
+ _pkgconfig_unset(${_prefix}_FOUND)
+ _pkgconfig_unset(${_prefix}_VERSION)
+ _pkgconfig_unset(${_prefix}_PREFIX)
+ _pkgconfig_unset(${_prefix}_INCLUDEDIR)
+ _pkgconfig_unset(${_prefix}_LIBDIR)
+ _pkgconfig_unset(${_prefix}_MODULE_NAME)
+ _pkgconfig_unset(${_prefix}_LIBS)
+ _pkgconfig_unset(${_prefix}_LIBS_L)
+ _pkgconfig_unset(${_prefix}_LIBS_PATHS)
+ _pkgconfig_unset(${_prefix}_LIBS_OTHER)
+ _pkgconfig_unset(${_prefix}_CFLAGS)
+ _pkgconfig_unset(${_prefix}_CFLAGS_I)
+ _pkgconfig_unset(${_prefix}_CFLAGS_OTHER)
+ _pkgconfig_unset(${_prefix}_STATIC_LIBDIR)
+ _pkgconfig_unset(${_prefix}_STATIC_LIBS)
+ _pkgconfig_unset(${_prefix}_STATIC_LIBS_L)
+ _pkgconfig_unset(${_prefix}_STATIC_LIBS_PATHS)
+ _pkgconfig_unset(${_prefix}_STATIC_LIBS_OTHER)
+ _pkgconfig_unset(${_prefix}_STATIC_CFLAGS)
+ _pkgconfig_unset(${_prefix}_STATIC_CFLAGS_I)
+ _pkgconfig_unset(${_prefix}_STATIC_CFLAGS_OTHER)
+
+ # create a better addressable variable of the modules and calculate its size
+ set(_pkg_check_modules_list ${ARGN})
+ list(LENGTH _pkg_check_modules_list _pkg_check_modules_cnt)
+
+ if(PKG_CONFIG_EXECUTABLE)
+ # give out status message telling checked module
+ if (NOT ${_is_silent})
+ if (_pkg_check_modules_cnt EQUAL 1)
+ message(STATUS "Checking for module '${_pkg_check_modules_list}'")
+ else()
+ message(STATUS "Checking for modules '${_pkg_check_modules_list}'")
+ endif()
+ endif()
+
+ set(_pkg_check_modules_packages)
+ set(_pkg_check_modules_failed)
+
+ _pkg_set_path_internal()
+
+ # iterate through module list and check whether they exist and match the required version
+ foreach (_pkg_check_modules_pkg ${_pkg_check_modules_list})
+ set(_pkg_check_modules_exist_query)
+
+ # check whether version is given
+ if (_pkg_check_modules_pkg MATCHES "(.*[^><])(=|[><]=?)(.*)")
+ set(_pkg_check_modules_pkg_name "${CMAKE_MATCH_1}")
+ set(_pkg_check_modules_pkg_op "${CMAKE_MATCH_2}")
+ set(_pkg_check_modules_pkg_ver "${CMAKE_MATCH_3}")
+ else()
+ set(_pkg_check_modules_pkg_name "${_pkg_check_modules_pkg}")
+ set(_pkg_check_modules_pkg_op)
+ set(_pkg_check_modules_pkg_ver)
+ endif()
+
+ _pkgconfig_unset(${_prefix}_${_pkg_check_modules_pkg_name}_VERSION)
+ _pkgconfig_unset(${_prefix}_${_pkg_check_modules_pkg_name}_PREFIX)
+ _pkgconfig_unset(${_prefix}_${_pkg_check_modules_pkg_name}_INCLUDEDIR)
+ _pkgconfig_unset(${_prefix}_${_pkg_check_modules_pkg_name}_LIBDIR)
+
+ list(APPEND _pkg_check_modules_packages "${_pkg_check_modules_pkg_name}")
+
+ # create the final query which is of the format:
+ # * <pkg-name> > <version>
+ # * <pkg-name> >= <version>
+ # * <pkg-name> = <version>
+ # * <pkg-name> <= <version>
+ # * <pkg-name> < <version>
+ # * --exists <pkg-name>
+ list(APPEND _pkg_check_modules_exist_query --print-errors --short-errors)
+ if (_pkg_check_modules_pkg_op)
+ list(APPEND _pkg_check_modules_exist_query "${_pkg_check_modules_pkg_name} ${_pkg_check_modules_pkg_op} ${_pkg_check_modules_pkg_ver}")
+ else()
+ list(APPEND _pkg_check_modules_exist_query --exists)
+ list(APPEND _pkg_check_modules_exist_query "${_pkg_check_modules_pkg_name}")
+ endif()
+
+ # execute the query
+ execute_process(
+ COMMAND ${PKG_CONFIG_EXECUTABLE} ${_pkg_check_modules_exist_query}
+ RESULT_VARIABLE _pkgconfig_retval
+ ERROR_VARIABLE _pkgconfig_error
+ ERROR_STRIP_TRAILING_WHITESPACE)
+
+ # evaluate result and tell failures
+ if (_pkgconfig_retval)
+ if(NOT ${_is_silent})
+ message(STATUS " ${_pkgconfig_error}")
+ endif()
+
+ set(_pkg_check_modules_failed 1)
+ endif()
+ endforeach()
+
+ if(_pkg_check_modules_failed)
+ # fail when requested
+ if (${_is_required})
+ message(FATAL_ERROR "A required package was not found")
+ endif ()
+ else()
+ # when we are here, we checked whether requested modules
+ # exist. Now, go through them and set variables
+
+ _pkgconfig_set(${_prefix}_FOUND 1)
+ list(LENGTH _pkg_check_modules_packages pkg_count)
+
+ # iterate through all modules again and set individual variables
+ foreach (_pkg_check_modules_pkg ${_pkg_check_modules_packages})
+ # handle case when there is only one package required
+ if (pkg_count EQUAL 1)
+ set(_pkg_check_prefix "${_prefix}")
+ else()
+ set(_pkg_check_prefix "${_prefix}_${_pkg_check_modules_pkg}")
+ endif()
+
+ _pkgconfig_invoke(${_pkg_check_modules_pkg} "${_pkg_check_prefix}" VERSION "" --modversion )
+ pkg_get_variable("${_pkg_check_prefix}_PREFIX" ${_pkg_check_modules_pkg} "prefix")
+ pkg_get_variable("${_pkg_check_prefix}_INCLUDEDIR" ${_pkg_check_modules_pkg} "includedir")
+ pkg_get_variable("${_pkg_check_prefix}_LIBDIR" ${_pkg_check_modules_pkg} "libdir")
+ foreach (variable IN ITEMS PREFIX INCLUDEDIR LIBDIR)
+ _pkgconfig_set("${_pkg_check_prefix}_${variable}" "${${_pkg_check_prefix}_${variable}}")
+ endforeach ()
+ _pkgconfig_set("${_pkg_check_prefix}_MODULE_NAME" "${_pkg_check_modules_pkg}")
+
+ if (NOT ${_is_silent})
+ message(STATUS " Found ${_pkg_check_modules_pkg}, version ${_pkgconfig_VERSION}")
+ endif ()
+ endforeach()
+
+ # set variables which are combined for multiple modules
+ _pkgconfig_invoke_dyn("${_pkg_check_modules_packages}" "${_prefix}" LIBRARIES "(^| )-l" --libs-only-l )
+ _pkgconfig_invoke_dyn("${_pkg_check_modules_packages}" "${_prefix}" LIBRARY_DIRS "(^| )-L" --libs-only-L )
+ _pkgconfig_invoke_dyn("${_pkg_check_modules_packages}" "${_prefix}" LDFLAGS "" --libs )
+ _pkgconfig_invoke_dyn("${_pkg_check_modules_packages}" "${_prefix}" LDFLAGS_OTHER "" --libs-only-other )
+
+ if (APPLE AND "-framework" IN_LIST ${_prefix}_LDFLAGS_OTHER)
+ _pkgconfig_extract_frameworks("${_prefix}")
+ endif()
+
+ _pkgconfig_invoke_dyn("${_pkg_check_modules_packages}" "${_prefix}" INCLUDE_DIRS "(^| )(-I|-isystem ?)" --cflags-only-I )
+ _pkgconfig_invoke_dyn("${_pkg_check_modules_packages}" "${_prefix}" CFLAGS "" --cflags )
+ _pkgconfig_invoke_dyn("${_pkg_check_modules_packages}" "${_prefix}" CFLAGS_OTHER "" --cflags-only-other )
+
+ if (${_prefix}_CFLAGS_OTHER MATCHES "-isystem")
+ _pkgconfig_extract_isystem("${_prefix}")
+ endif ()
+
+ _pkg_recalculate("${_prefix}" ${_no_cmake_path} ${_no_cmake_environment_path} ${_imp_target} ${_imp_target_global})
+ endif()
+
+ _pkg_restore_path_internal()
+ else()
+ if (${_is_required})
+ message(SEND_ERROR "pkg-config tool not found")
+ endif ()
+ endif()
+endmacro()
+
+
+#[========================================[.rst:
+.. command:: pkg_check_modules
+
+ Checks for all the given modules, setting a variety of result variables in
+ the calling scope.
+
+ .. code-block:: cmake
+
+ pkg_check_modules(<prefix>
+ [REQUIRED] [QUIET]
+ [NO_CMAKE_PATH]
+ [NO_CMAKE_ENVIRONMENT_PATH]
+ [IMPORTED_TARGET [GLOBAL]]
+ <moduleSpec> [<moduleSpec>...])
+
+ When the ``REQUIRED`` argument is given, the command will fail with an error
+ if module(s) could not be found.
+
+ When the ``QUIET`` argument is given, no status messages will be printed.
+
+ By default, if :variable:`CMAKE_MINIMUM_REQUIRED_VERSION` is 3.1 or
+ later, or if :variable:`PKG_CONFIG_USE_CMAKE_PREFIX_PATH` is set to a
+ boolean ``True`` value, then the :variable:`CMAKE_PREFIX_PATH`,
+ :variable:`CMAKE_FRAMEWORK_PATH`, and :variable:`CMAKE_APPBUNDLE_PATH` cache
+ and environment variables will be added to the ``pkg-config`` search path.
+ The ``NO_CMAKE_PATH`` and ``NO_CMAKE_ENVIRONMENT_PATH`` arguments
+ disable this behavior for the cache variables and environment variables
+ respectively.
+
+ The ``IMPORTED_TARGET`` argument will create an imported target named
+ ``PkgConfig::<prefix>`` that can be passed directly as an argument to
+ :command:`target_link_libraries`. The ``GLOBAL`` argument will make the
+ imported target available in global scope.
+
+ Each ``<moduleSpec>`` can be either a bare module name or it can be a
+ module name with a version constraint (operators ``=``, ``<``, ``>``,
+ ``<=`` and ``>=`` are supported). The following are examples for a module
+ named ``foo`` with various constraints:
+
+ - ``foo`` matches any version.
+ - ``foo<2`` only matches versions before 2.
+ - ``foo>=3.1`` matches any version from 3.1 or later.
+ - ``foo=1.2.3`` requires that foo must be exactly version 1.2.3.
+
+ The following variables may be set upon return. Two sets of values exist:
+ One for the common case (``<XXX> = <prefix>``) and another for the
+ information ``pkg-config`` provides when called with the ``--static``
+ option (``<XXX> = <prefix>_STATIC``).
+
+ ``<XXX>_FOUND``
+ set to 1 if module(s) exist
+ ``<XXX>_LIBRARIES``
+ only the libraries (without the '-l')
+ ``<XXX>_LINK_LIBRARIES``
+ the libraries and their absolute paths
+ ``<XXX>_LIBRARY_DIRS``
+ the paths of the libraries (without the '-L')
+ ``<XXX>_LDFLAGS``
+ all required linker flags
+ ``<XXX>_LDFLAGS_OTHER``
+ all other linker flags
+ ``<XXX>_INCLUDE_DIRS``
+ the '-I' preprocessor flags (without the '-I')
+ ``<XXX>_CFLAGS``
+ all required cflags
+ ``<XXX>_CFLAGS_OTHER``
+ the other compiler flags
+
+ All but ``<XXX>_FOUND`` may be a :ref:`;-list <CMake Language Lists>` if the
+ associated variable returned from ``pkg-config`` has multiple values.
+
+ There are some special variables whose prefix depends on the number of
+ ``<moduleSpec>`` given. When there is only one ``<moduleSpec>``,
+ ``<YYY>`` will simply be ``<prefix>``, but if two or more ``<moduleSpec>``
+ items are given, ``<YYY>`` will be ``<prefix>_<moduleName>``.
+
+ ``<YYY>_VERSION``
+ version of the module
+ ``<YYY>_PREFIX``
+ prefix directory of the module
+ ``<YYY>_INCLUDEDIR``
+ include directory of the module
+ ``<YYY>_LIBDIR``
+ lib directory of the module
+
+ Examples:
+
+ .. code-block:: cmake
+
+ pkg_check_modules (GLIB2 glib-2.0)
+
+ Looks for any version of glib2. If found, the output variable
+ ``GLIB2_VERSION`` will hold the actual version found.
+
+ .. code-block:: cmake
+
+ pkg_check_modules (GLIB2 glib-2.0>=2.10)
+
+ Looks for at least version 2.10 of glib2. If found, the output variable
+ ``GLIB2_VERSION`` will hold the actual version found.
+
+ .. code-block:: cmake
+
+ pkg_check_modules (FOO glib-2.0>=2.10 gtk+-2.0)
+
+ Looks for both glib2-2.0 (at least version 2.10) and any version of
+ gtk2+-2.0. Only if both are found will ``FOO`` be considered found.
+ The ``FOO_glib-2.0_VERSION`` and ``FOO_gtk+-2.0_VERSION`` variables will be
+ set to their respective found module versions.
+
+ .. code-block:: cmake
+
+ pkg_check_modules (XRENDER REQUIRED xrender)
+
+ Requires any version of ``xrender``. Example output variables set by a
+ successful call::
+
+ XRENDER_LIBRARIES=Xrender;X11
+ XRENDER_STATIC_LIBRARIES=Xrender;X11;pthread;Xau;Xdmcp
+#]========================================]
+macro(pkg_check_modules _prefix _module0)
+ _pkgconfig_parse_options(_pkg_modules _pkg_is_required _pkg_is_silent _no_cmake_path _no_cmake_environment_path _imp_target _imp_target_global "${_module0}" ${ARGN})
+ # check cached value
+ if (NOT DEFINED __pkg_config_checked_${_prefix} OR __pkg_config_checked_${_prefix} LESS ${PKG_CONFIG_VERSION} OR NOT ${_prefix}_FOUND OR
+ (NOT "${ARGN}" STREQUAL "" AND NOT "${__pkg_config_arguments_${_prefix}}" STREQUAL "${_module0};${ARGN}") OR
+ ( "${ARGN}" STREQUAL "" AND NOT "${__pkg_config_arguments_${_prefix}}" STREQUAL "${_module0}"))
+ _pkg_check_modules_internal("${_pkg_is_required}" "${_pkg_is_silent}" ${_no_cmake_path} ${_no_cmake_environment_path} ${_imp_target} ${_imp_target_global} "${_prefix}" ${_pkg_modules})
+
+ _pkgconfig_set(__pkg_config_checked_${_prefix} ${PKG_CONFIG_VERSION})
+ if (${_prefix}_FOUND)
+ _pkgconfig_set(__pkg_config_arguments_${_prefix} "${_module0};${ARGN}")
+ endif()
+ else()
+ if (${_prefix}_FOUND)
+ _pkg_recalculate("${_prefix}" ${_no_cmake_path} ${_no_cmake_environment_path} ${_imp_target} ${_imp_target_global})
+ endif()
+ endif()
+endmacro()
+
+
+#[========================================[.rst:
+.. command:: pkg_search_module
+
+ The behavior of this command is the same as :command:`pkg_check_modules`,
+ except that rather than checking for all the specified modules, it searches
+ for just the first successful match.
+
+ .. code-block:: cmake
+
+ pkg_search_module(<prefix>
+ [REQUIRED] [QUIET]
+ [NO_CMAKE_PATH]
+ [NO_CMAKE_ENVIRONMENT_PATH]
+ [IMPORTED_TARGET [GLOBAL]]
+ <moduleSpec> [<moduleSpec>...])
+
+ If a module is found, the ``<prefix>_MODULE_NAME`` variable will contain the
+ name of the matching module. This variable can be used if you need to run
+ :command:`pkg_get_variable`.
+
+ Example:
+
+ .. code-block:: cmake
+
+ pkg_search_module (BAR libxml-2.0 libxml2 libxml>=2)
+#]========================================]
+macro(pkg_search_module _prefix _module0)
+ _pkgconfig_parse_options(_pkg_modules_alt _pkg_is_required _pkg_is_silent _no_cmake_path _no_cmake_environment_path _imp_target _imp_target_global "${_module0}" ${ARGN})
+ # check cached value
+ if (NOT DEFINED __pkg_config_checked_${_prefix} OR __pkg_config_checked_${_prefix} LESS ${PKG_CONFIG_VERSION} OR NOT ${_prefix}_FOUND)
+ set(_pkg_modules_found 0)
+
+ if (NOT ${_pkg_is_silent})
+ message(STATUS "Checking for one of the modules '${_pkg_modules_alt}'")
+ endif ()
+
+ # iterate through all modules and stop at the first working one.
+ foreach(_pkg_alt ${_pkg_modules_alt})
+ if(NOT _pkg_modules_found)
+ _pkg_check_modules_internal(0 1 ${_no_cmake_path} ${_no_cmake_environment_path} ${_imp_target} ${_imp_target_global} "${_prefix}" "${_pkg_alt}")
+ endif()
+
+ if (${_prefix}_FOUND)
+ set(_pkg_modules_found 1)
+ break()
+ endif()
+ endforeach()
+
+ if (NOT ${_prefix}_FOUND)
+ if(${_pkg_is_required})
+ message(SEND_ERROR "None of the required '${_pkg_modules_alt}' found")
+ endif()
+ endif()
+
+ _pkgconfig_set(__pkg_config_checked_${_prefix} ${PKG_CONFIG_VERSION})
+ elseif (${_prefix}_FOUND)
+ _pkg_recalculate("${_prefix}" ${_no_cmake_path} ${_no_cmake_environment_path} ${_imp_target} ${_imp_target_global})
+ endif()
+endmacro()
+
+#[========================================[.rst:
+.. command:: pkg_get_variable
+
+ Retrieves the value of a pkg-config variable ``varName`` and stores it in the
+ result variable ``resultVar`` in the calling scope.
+
+ .. code-block:: cmake
+
+ pkg_get_variable(<resultVar> <moduleName> <varName>)
+
+ If ``pkg-config`` returns multiple values for the specified variable,
+ ``resultVar`` will contain a :ref:`;-list <CMake Language Lists>`.
+
+ For example:
+
+ .. code-block:: cmake
+
+ pkg_get_variable(GI_GIRDIR gobject-introspection-1.0 girdir)
+#]========================================]
+function (pkg_get_variable result pkg variable)
+ _pkg_set_path_internal()
+ _pkgconfig_invoke("${pkg}" "prefix" "result" "" "--variable=${variable}")
+ set("${result}"
+ "${prefix_result}"
+ PARENT_SCOPE)
+ _pkg_restore_path_internal()
+endfunction ()
+
+
+#[========================================[.rst:
+Variables Affecting Behavior
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. variable:: PKG_CONFIG_EXECUTABLE
+
+ This can be set to the path of the pkg-config executable. If not provided,
+ it will be set by the module as a result of calling :command:`find_program`
+ internally. The ``PKG_CONFIG`` environment variable can be used as a hint.
+
+.. variable:: PKG_CONFIG_USE_CMAKE_PREFIX_PATH
+
+ Specifies whether :command:`pkg_check_modules` and
+ :command:`pkg_search_module` should add the paths in the
+ :variable:`CMAKE_PREFIX_PATH`, :variable:`CMAKE_FRAMEWORK_PATH` and
+ :variable:`CMAKE_APPBUNDLE_PATH` cache and environment variables to the
+ ``pkg-config`` search path.
+
+ If this variable is not set, this behavior is enabled by default if
+ :variable:`CMAKE_MINIMUM_REQUIRED_VERSION` is 3.1 or later, disabled
+ otherwise.
+#]========================================]
+
+
+### Local Variables:
+### mode: cmake
+### End:
+
+cmake_policy(POP)
--- /dev/null
+install(FILES
+ FindPython3.cmake
+ Support.cmake
+ DESTINATION ${DUNE_INSTALL_MODULEDIR}/FindPython3)
--- /dev/null
+# Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+# file Copyright.txt or https://cmake.org/licensing for details.
+
+#[=======================================================================[.rst:
+FindPython3
+-----------
+
+.. versionadded:: 3.12
+
+Find Python 3 interpreter, compiler and development environment (include
+directories and libraries).
+
+When a version is requested, it can be specified as a simple value or as a
+range. For a detailed description of version range usage and capabilities,
+refer to the :command:`find_package` command.
+
+The following components are supported:
+
+* ``Interpreter``: search for Python 3 interpreter
+* ``Compiler``: search for Python 3 compiler. Only offered by IronPython.
+* ``Development``: search for development artifacts (include directories and
+ libraries). This component includes two sub-components which can be specified
+ independently:
+
+ * ``Development.Module``: search for artifacts for Python 3 module
+ developments.
+ * ``Development.Embed``: search for artifacts for Python 3 embedding
+ developments.
+
+* ``NumPy``: search for NumPy include directories.
+
+If no ``COMPONENTS`` are specified, ``Interpreter`` is assumed.
+
+If component ``Development`` is specified, it implies sub-components
+``Development.Module`` and ``Development.Embed``.
+
+To ensure consistent versions between components ``Interpreter``, ``Compiler``,
+``Development`` (or one of its sub-components) and ``NumPy``, specify all
+components at the same time::
+
+ find_package (Python3 COMPONENTS Interpreter Development)
+
+This module looks only for version 3 of Python. This module can be used
+concurrently with :module:`FindPython2` module to use both Python versions.
+
+The :module:`FindPython` module can be used if Python version does not matter
+for you.
+
+.. note::
+
+ If components ``Interpreter`` and ``Development`` (or one of its
+ sub-components) are both specified, this module search only for interpreter
+ with same platform architecture as the one defined by ``CMake``
+ configuration. This constraint does not apply if only ``Interpreter``
+ component is specified.
+
+Imported Targets
+^^^^^^^^^^^^^^^^
+
+This module defines the following :ref:`Imported Targets <Imported Targets>`
+(when :prop_gbl:`CMAKE_ROLE` is ``PROJECT``):
+
+``Python3::Interpreter``
+ Python 3 interpreter. Target defined if component ``Interpreter`` is found.
+``Python3::Compiler``
+ Python 3 compiler. Target defined if component ``Compiler`` is found.
+``Python3::Module``
+ Python 3 library for Python module. Target defined if component
+ ``Development.Module`` is found.
+``Python3::Python``
+ Python 3 library for Python embedding. Target defined if component
+ ``Development.Embed`` is found.
+``Python3::NumPy``
+ NumPy library for Python 3. Target defined if component ``NumPy`` is found.
+
+Result Variables
+^^^^^^^^^^^^^^^^
+
+This module will set the following variables in your project
+(see :ref:`Standard Variable Names <CMake Developer Standard Variable Names>`):
+
+``Python3_FOUND``
+ System has the Python 3 requested components.
+``Python3_Interpreter_FOUND``
+ System has the Python 3 interpreter.
+``Python3_EXECUTABLE``
+ Path to the Python 3 interpreter.
+``Python3_INTERPRETER_ID``
+ A short string unique to the interpreter. Possible values include:
+ * Python
+ * ActivePython
+ * Anaconda
+ * Canopy
+ * IronPython
+ * PyPy
+``Python3_STDLIB``
+ Standard platform independent installation directory.
+
+ Information returned by
+ ``distutils.sysconfig.get_python_lib(plat_specific=False,standard_lib=True)``
+ or else ``sysconfig.get_path('stdlib')``.
+``Python3_STDARCH``
+ Standard platform dependent installation directory.
+
+ Information returned by
+ ``distutils.sysconfig.get_python_lib(plat_specific=True,standard_lib=True)``
+ or else ``sysconfig.get_path('platstdlib')``.
+``Python3_SITELIB``
+ Third-party platform independent installation directory.
+
+ Information returned by
+ ``distutils.sysconfig.get_python_lib(plat_specific=False,standard_lib=False)``
+ or else ``sysconfig.get_path('purelib')``.
+``Python3_SITEARCH``
+ Third-party platform dependent installation directory.
+
+ Information returned by
+ ``distutils.sysconfig.get_python_lib(plat_specific=True,standard_lib=False)``
+ or else ``sysconfig.get_path('platlib')``.
+``Python3_SOABI``
+ Extension suffix for modules.
+
+ Information returned by
+ ``distutils.sysconfig.get_config_var('SOABI')`` or computed from
+ ``distutils.sysconfig.get_config_var('EXT_SUFFIX')`` or
+ ``python3-config --extension-suffix``. If package ``distutils.sysconfig`` is
+ not available, ``sysconfig.get_config_var('SOABI')`` or
+ ``sysconfig.get_config_var('EXT_SUFFIX')`` are used.
+``Python3_Compiler_FOUND``
+ System has the Python 3 compiler.
+``Python3_COMPILER``
+ Path to the Python 3 compiler. Only offered by IronPython.
+``Python3_COMPILER_ID``
+ A short string unique to the compiler. Possible values include:
+ * IronPython
+``Python3_DOTNET_LAUNCHER``
+ The ``.Net`` interpreter. Only used by ``IronPython`` implementation.
+``Python3_Development_FOUND``
+ System has the Python 3 development artifacts.
+``Python3_Development.Module_FOUND``
+ System has the Python 3 development artifacts for Python module.
+``Python3_Development.Embed_FOUND``
+ System has the Python 3 development artifacts for Python embedding.
+``Python3_INCLUDE_DIRS``
+ The Python 3 include directories.
+``Python3_LINK_OPTIONS``
+ The Python 3 link options. Some configurations require specific link options
+ for a correct build and execution.
+``Python3_LIBRARIES``
+ The Python 3 libraries.
+``Python3_LIBRARY_DIRS``
+ The Python 3 library directories.
+``Python3_RUNTIME_LIBRARY_DIRS``
+ The Python 3 runtime library directories.
+``Python3_VERSION``
+ Python 3 version.
+``Python3_VERSION_MAJOR``
+ Python 3 major version.
+``Python3_VERSION_MINOR``
+ Python 3 minor version.
+``Python3_VERSION_PATCH``
+ Python 3 patch version.
+``Python3_PyPy_VERSION``
+ Python 3 PyPy version.
+``Python3_NumPy_FOUND``
+ System has the NumPy.
+``Python3_NumPy_INCLUDE_DIRS``
+ The NumPy include directories.
+``Python3_NumPy_VERSION``
+ The NumPy version.
+
+Hints
+^^^^^
+
+``Python3_ROOT_DIR``
+ Define the root directory of a Python 3 installation.
+
+``Python3_USE_STATIC_LIBS``
+ * If not defined, search for shared libraries and static libraries in that
+ order.
+ * If set to TRUE, search **only** for static libraries.
+ * If set to FALSE, search **only** for shared libraries.
+
+``Python3_FIND_ABI``
+ This variable defines which ABIs, as defined in
+ `PEP 3149 <https://www.python.org/dev/peps/pep-3149/>`_, should be searched.
+
+ .. note::
+
+ If ``Python3_FIND_ABI`` is not defined, any ABI will be searched.
+
+ The ``Python3_FIND_ABI`` variable is a 3-tuple specifying, in that order,
+ ``pydebug`` (``d``), ``pymalloc`` (``m``) and ``unicode`` (``u``) flags.
+ Each element can be set to one of the following:
+
+ * ``ON``: Corresponding flag is selected.
+ * ``OFF``: Corresponding flag is not selected.
+ * ``ANY``: The two possibilities (``ON`` and ``OFF``) will be searched.
+
+ From this 3-tuple, various ABIs will be searched starting from the most
+ specialized to the most general. Moreover, ``debug`` versions will be
+ searched **after** ``non-debug`` ones.
+
+ For example, if we have::
+
+ set (Python3_FIND_ABI "ON" "ANY" "ANY")
+
+ The following flags combinations will be appended, in that order, to the
+ artifact names: ``dmu``, ``dm``, ``du``, and ``d``.
+
+ And to search any possible ABIs::
+
+ set (Python3_FIND_ABI "ANY" "ANY" "ANY")
+
+ The following combinations, in that order, will be used: ``mu``, ``m``,
+ ``u``, ``<empty>``, ``dmu``, ``dm``, ``du`` and ``d``.
+
+ .. note::
+
+ This hint is useful only on ``POSIX`` systems. So, on ``Windows`` systems,
+ when ``Python3_FIND_ABI`` is defined, ``Python`` distributions from
+ `python.org <https://www.python.org/>`_ will be found only if value for
+ each flag is ``OFF`` or ``ANY``.
+
+``Python3_FIND_STRATEGY``
+ This variable defines how lookup will be done.
+ The ``Python3_FIND_STRATEGY`` variable can be set to one of the following:
+
+ * ``VERSION``: Try to find the most recent version in all specified
+ locations.
+ This is the default if policy :policy:`CMP0094` is undefined or set to
+ ``OLD``.
+ * ``LOCATION``: Stops lookup as soon as a version satisfying version
+ constraints is founded.
+ This is the default if policy :policy:`CMP0094` is set to ``NEW``.
+
+``Python3_FIND_REGISTRY``
+ On Windows the ``Python3_FIND_REGISTRY`` variable determine the order
+ of preference between registry and environment variables.
+ The ``Python3_FIND_REGISTRY`` variable can be set to one of the following:
+
+ * ``FIRST``: Try to use registry before environment variables.
+ This is the default.
+ * ``LAST``: Try to use registry after environment variables.
+ * ``NEVER``: Never try to use registry.
+
+``Python3_FIND_FRAMEWORK``
+ On macOS the ``Python3_FIND_FRAMEWORK`` variable determine the order of
+ preference between Apple-style and unix-style package components.
+ This variable can take same values as :variable:`CMAKE_FIND_FRAMEWORK`
+ variable.
+
+ .. note::
+
+ Value ``ONLY`` is not supported so ``FIRST`` will be used instead.
+
+ If ``Python3_FIND_FRAMEWORK`` is not defined, :variable:`CMAKE_FIND_FRAMEWORK`
+ variable will be used, if any.
+
+``Python3_FIND_VIRTUALENV``
+ This variable defines the handling of virtual environments managed by
+ ``virtualenv`` or ``conda``. It is meaningful only when a virtual environment
+ is active (i.e. the ``activate`` script has been evaluated). In this case, it
+ takes precedence over ``Python3_FIND_REGISTRY`` and ``CMAKE_FIND_FRAMEWORK``
+ variables. The ``Python3_FIND_VIRTUALENV`` variable can be set to one of the
+ following:
+
+ * ``FIRST``: The virtual environment is used before any other standard
+ paths to look-up for the interpreter. This is the default.
+ * ``ONLY``: Only the virtual environment is used to look-up for the
+ interpreter.
+ * ``STANDARD``: The virtual environment is not used to look-up for the
+ interpreter but environment variable ``PATH`` is always considered.
+ In this case, variable ``Python3_FIND_REGISTRY`` (Windows) or
+ ``CMAKE_FIND_FRAMEWORK`` (macOS) can be set with value ``LAST`` or
+ ``NEVER`` to select preferably the interpreter from the virtual
+ environment.
+
+ .. note::
+
+ If the component ``Development`` is requested, it is **strongly**
+ recommended to also include the component ``Interpreter`` to get expected
+ result.
+
+``Python3_FIND_IMPLEMENTATIONS``
+ This variable defines, in an ordered list, the different implementations
+ which will be searched. The ``Python3_FIND_IMPLEMENTATIONS`` variable can
+ hold the following values:
+
+ * ``CPython``: this is the standard implementation. Various products, like
+ ``Anaconda`` or ``ActivePython``, rely on this implementation.
+ * ``IronPython``: This implementation use the ``CSharp`` language for
+ ``.NET Framework`` on top of the `Dynamic Language Runtime` (``DLR``).
+ See `IronPython <http://ironpython.net>`_.
+ * ``PyPy``: This implementation use ``RPython`` language and
+ ``RPython translation toolchain`` to produce the python interpreter.
+ See `PyPy <https://www.pypy.org>`_.
+
+ The default value is:
+
+ * Windows platform: ``CPython``, ``IronPython``
+ * Other platforms: ``CPython``
+
+ .. note::
+
+ This hint has the lowest priority of all hints, so even if, for example,
+ you specify ``IronPython`` first and ``CPython`` in second, a python
+ product based on ``CPython`` can be selected because, for example with
+ ``Python3_FIND_STRATEGY=LOCATION``, each location will be search first for
+ ``IronPython`` and second for ``CPython``.
+
+ .. note::
+
+ When ``IronPython`` is specified, on platforms other than ``Windows``, the
+ ``.Net`` interpreter (i.e. ``mono`` command) is expected to be available
+ through the ``PATH`` variable.
+
+``Python3_FIND_UNVERSIONED_NAMES``
+ .. versionadded:: 3.20
+
+ This variable defines how the generic names will be searched. Currently, it
+ only applies to the generic names of the interpreter, namely, ``python3`` and
+ ``python``.
+ The ``Python3_FIND_UNVERSIONED_NAMES`` variable can be set to one of the
+ following values:
+
+ * ``FIRST``: The generic names are searched before the more specialized ones
+ (such as ``python3.5`` for example).
+ * ``LAST``: The generic names are searched after the more specialized ones.
+ This is the default.
+ * ``NEVER``: The generic name are not searched at all.
+
+Artifacts Specification
+^^^^^^^^^^^^^^^^^^^^^^^
+
+To solve special cases, it is possible to specify directly the artifacts by
+setting the following variables:
+
+``Python3_EXECUTABLE``
+ The path to the interpreter.
+
+``Python3_COMPILER``
+ The path to the compiler.
+
+``Python3_DOTNET_LAUNCHER``
+ The ``.Net`` interpreter. Only used by ``IronPython`` implementation.
+
+``Python3_LIBRARY``
+ The path to the library. It will be used to compute the
+ variables ``Python3_LIBRARIES``, ``Python3_LIBRARY_DIRS`` and
+ ``Python3_RUNTIME_LIBRARY_DIRS``.
+
+``Python3_INCLUDE_DIR``
+ The path to the directory of the ``Python`` headers. It will be used to
+ compute the variable ``Python3_INCLUDE_DIRS``.
+
+``Python3_NumPy_INCLUDE_DIR``
+ The path to the directory of the ``NumPy`` headers. It will be used to
+ compute the variable ``Python3_NumPy_INCLUDE_DIRS``.
+
+.. note::
+
+ All paths must be absolute. Any artifact specified with a relative path
+ will be ignored.
+
+.. note::
+
+ When an artifact is specified, all ``HINTS`` will be ignored and no search
+ will be performed for this artifact.
+
+ If more than one artifact is specified, it is the user's responsibility to
+ ensure the consistency of the various artifacts.
+
+By default, this module supports multiple calls in different directories of a
+project with different version/component requirements while providing correct
+and consistent results for each call. To support this behavior, ``CMake`` cache
+is not used in the traditional way which can be problematic for interactive
+specification. So, to enable also interactive specification, module behavior
+can be controlled with the following variable:
+
+``Python3_ARTIFACTS_INTERACTIVE``
+ Selects the behavior of the module. This is a boolean variable:
+
+ * If set to ``TRUE``: Create CMake cache entries for the above artifact
+ specification variables so that users can edit them interactively.
+ This disables support for multiple version/component requirements.
+ * If set to ``FALSE`` or undefined: Enable multiple version/component
+ requirements.
+
+Commands
+^^^^^^^^
+
+This module defines the command ``Python3_add_library`` (when
+:prop_gbl:`CMAKE_ROLE` is ``PROJECT``), which has the same semantics as
+:command:`add_library` and adds a dependency to target ``Python3::Python`` or,
+when library type is ``MODULE``, to target ``Python3::Module`` and takes care
+of Python module naming rules::
+
+ Python3_add_library (<name> [STATIC | SHARED | MODULE [WITH_SOABI]]
+ <source1> [<source2> ...])
+
+If the library type is not specified, ``MODULE`` is assumed.
+
+For ``MODULE`` library type, if option ``WITH_SOABI`` is specified, the
+module suffix will include the ``Python3_SOABI`` value, if any.
+#]=======================================================================]
+
+
+set (_PYTHON_PREFIX Python3)
+
+set (_Python3_REQUIRED_VERSION_MAJOR 3)
+
+include (${CMAKE_CURRENT_LIST_DIR}/Support.cmake)
+
+if (COMMAND __Python3_add_library)
+ macro (Python3_add_library)
+ __Python3_add_library (Python3 ${ARGV})
+ endmacro()
+endif()
+
+unset (_PYTHON_PREFIX)
--- /dev/null
+# Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+# file Copyright.txt or https://cmake.org/licensing for details.
+
+#
+# This file is a "template" file used by various FindPython modules.
+#
+
+#
+# Initial configuration
+#
+
+cmake_policy(PUSH)
+# numbers and boolean constants
+cmake_policy (SET CMP0012 NEW)
+# IN_LIST operator
+cmake_policy (SET CMP0057 NEW)
+
+if (NOT DEFINED _PYTHON_PREFIX)
+ message (FATAL_ERROR "FindPython: INTERNAL ERROR")
+endif()
+if (NOT DEFINED _${_PYTHON_PREFIX}_REQUIRED_VERSION_MAJOR)
+ message (FATAL_ERROR "FindPython: INTERNAL ERROR")
+endif()
+if (_${_PYTHON_PREFIX}_REQUIRED_VERSION_MAJOR EQUAL "3")
+ set(_${_PYTHON_PREFIX}_VERSIONS 3.10 3.9 3.8 3.7 3.6 3.5 3.4 3.3 3.2 3.1 3.0)
+elseif (_${_PYTHON_PREFIX}_REQUIRED_VERSION_MAJOR EQUAL "2")
+ set(_${_PYTHON_PREFIX}_VERSIONS 2.7 2.6 2.5 2.4 2.3 2.2 2.1 2.0)
+else()
+ message (FATAL_ERROR "FindPython: INTERNAL ERROR")
+endif()
+
+get_property(_${_PYTHON_PREFIX}_CMAKE_ROLE GLOBAL PROPERTY CMAKE_ROLE)
+
+include (FindPackageHandleStandardArgs)
+
+#
+# helper commands
+#
+macro (_PYTHON_DISPLAY_FAILURE _PYTHON_MSG)
+ if (${_PYTHON_PREFIX}_FIND_REQUIRED)
+ message (FATAL_ERROR "${_PYTHON_MSG}")
+ else()
+ if (NOT ${_PYTHON_PREFIX}_FIND_QUIETLY)
+ message(STATUS "${_PYTHON_MSG}")
+ endif ()
+ endif()
+
+ set (${_PYTHON_PREFIX}_FOUND FALSE)
+ string (TOUPPER "${_PYTHON_PREFIX}" _${_PYTHON_PREFIX}_UPPER_PREFIX)
+ set (${_PYTHON_UPPER_PREFIX}_FOUND FALSE)
+endmacro()
+
+
+function (_PYTHON_MARK_AS_INTERNAL)
+ foreach (var IN LISTS ARGV)
+ if (DEFINED CACHE{${var}})
+ set_property (CACHE ${var} PROPERTY TYPE INTERNAL)
+ endif()
+ endforeach()
+endfunction()
+
+
+macro (_PYTHON_SELECT_LIBRARY_CONFIGURATIONS _PYTHON_BASENAME)
+ if(NOT DEFINED ${_PYTHON_BASENAME}_LIBRARY_RELEASE)
+ set(${_PYTHON_BASENAME}_LIBRARY_RELEASE "${_PYTHON_BASENAME}_LIBRARY_RELEASE-NOTFOUND")
+ endif()
+ if(NOT DEFINED ${_PYTHON_BASENAME}_LIBRARY_DEBUG)
+ set(${_PYTHON_BASENAME}_LIBRARY_DEBUG "${_PYTHON_BASENAME}_LIBRARY_DEBUG-NOTFOUND")
+ endif()
+
+ get_property(_PYTHON_isMultiConfig GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG)
+ if (${_PYTHON_BASENAME}_LIBRARY_DEBUG AND ${_PYTHON_BASENAME}_LIBRARY_RELEASE AND
+ NOT ${_PYTHON_BASENAME}_LIBRARY_DEBUG STREQUAL ${_PYTHON_BASENAME}_LIBRARY_RELEASE AND
+ (_PYTHON_isMultiConfig OR CMAKE_BUILD_TYPE))
+ # if the generator is multi-config or if CMAKE_BUILD_TYPE is set for
+ # single-config generators, set optimized and debug libraries
+ set (${_PYTHON_BASENAME}_LIBRARIES "")
+ foreach (_PYTHON_libname IN LISTS ${_PYTHON_BASENAME}_LIBRARY_RELEASE)
+ list( APPEND ${_PYTHON_BASENAME}_LIBRARIES optimized "${_PYTHON_libname}")
+ endforeach()
+ foreach (_PYTHON_libname IN LISTS ${_PYTHON_BASENAME}_LIBRARY_DEBUG)
+ list( APPEND ${_PYTHON_BASENAME}_LIBRARIES debug "${_PYTHON_libname}")
+ endforeach()
+ elseif (${_PYTHON_BASENAME}_LIBRARY_RELEASE)
+ set (${_PYTHON_BASENAME}_LIBRARIES "${${_PYTHON_BASENAME}_LIBRARY_RELEASE}")
+ elseif (${_PYTHON_BASENAME}_LIBRARY_DEBUG)
+ set (${_PYTHON_BASENAME}_LIBRARIES "${${_PYTHON_BASENAME}_LIBRARY_DEBUG}")
+ else()
+ set (${_PYTHON_BASENAME}_LIBRARIES "${_PYTHON_BASENAME}_LIBRARY-NOTFOUND")
+ endif()
+endmacro()
+
+
+macro (_PYTHON_FIND_FRAMEWORKS)
+ if (CMAKE_HOST_APPLE OR APPLE)
+ file(TO_CMAKE_PATH "$ENV{CMAKE_FRAMEWORK_PATH}" _pff_CMAKE_FRAMEWORK_PATH)
+ set (_pff_frameworks ${CMAKE_FRAMEWORK_PATH}
+ ${_pff_CMAKE_FRAMEWORK_PATH}
+ ~/Library/Frameworks
+ /usr/local/Frameworks
+ ${CMAKE_SYSTEM_FRAMEWORK_PATH})
+ if (_pff_frameworks) # Behavior change in CMake 3.14
+ list (REMOVE_DUPLICATES _pff_frameworks)
+ endif ()
+ foreach (_pff_implementation IN LISTS _${_PYTHON_PREFIX}_FIND_IMPLEMENTATIONS)
+ unset (_${_PYTHON_PREFIX}_${_pff_implementation}_FRAMEWORKS)
+ if (_pff_implementation STREQUAL "CPython")
+ foreach (_pff_framework IN LISTS _pff_frameworks)
+ if (EXISTS ${_pff_framework}/Python${_${_PYTHON_PREFIX}_REQUIRED_VERSION_MAJOR}.framework)
+ list (APPEND _${_PYTHON_PREFIX}_${_pff_implementation}_FRAMEWORKS ${_pff_framework}/Python${_${_PYTHON_PREFIX}_REQUIRED_VERSION_MAJOR}.framework)
+ endif()
+ if (EXISTS ${_pff_framework}/Python.framework)
+ list (APPEND _${_PYTHON_PREFIX}_${_pff_implementation}_FRAMEWORKS ${_pff_framework}/Python.framework)
+ endif()
+ endforeach()
+ elseif (_pff_implementation STREQUAL "IronPython")
+ foreach (_pff_framework IN LISTS _pff_frameworks)
+ if (EXISTS ${_pff_framework}/IronPython.framework)
+ list (APPEND _${_PYTHON_PREFIX}_${_pff_implementation}_FRAMEWORKS ${_pff_framework}/IronPython.framework)
+ endif()
+ endforeach()
+ endif()
+ endforeach()
+ unset (_pff_implementation)
+ unset (_pff_frameworks)
+ unset (_pff_framework)
+ endif()
+endmacro()
+
+function (_PYTHON_GET_FRAMEWORKS _PYTHON_PGF_FRAMEWORK_PATHS)
+ cmake_parse_arguments (PARSE_ARGV 1 _PGF "" "" "IMPLEMENTATIONS;VERSION")
+
+ if (NOT _PGF_IMPLEMENTATIONS)
+ set (_PGF_IMPLEMENTATIONS ${_${_PYTHON_PREFIX}_FIND_IMPLEMENTATIONS})
+ endif()
+
+ set (framework_paths)
+
+ foreach (implementation IN LISTS _PGF_IMPLEMENTATIONS)
+ if (implementation STREQUAL "CPython")
+ foreach (version IN LISTS _PGF_VERSION)
+ foreach (framework IN LISTS _${_PYTHON_PREFIX}_${implementation}_FRAMEWORKS)
+ if (EXISTS "${framework}/Versions/${version}")
+ list (APPEND framework_paths "${framework}/Versions/${version}")
+ endif()
+ endforeach()
+ endforeach()
+ elseif (implementation STREQUAL "IronPython")
+ foreach (version IN LISTS _PGF_VERSION)
+ foreach (framework IN LISTS _${_PYTHON_PREFIX}_${implementation}_FRAMEWORKS)
+ # pick-up all available versions
+ file (GLOB versions LIST_DIRECTORIES true RELATIVE "${framework}/Versions/"
+ "${framework}/Versions/${version}*")
+ list (SORT versions ORDER DESCENDING)
+ list (TRANSFORM versions PREPEND "${framework}/Versions/")
+ list (APPEND framework_paths ${versions})
+ endforeach()
+ endforeach()
+ endif()
+ endforeach()
+
+ set (${_PYTHON_PGF_FRAMEWORK_PATHS} ${framework_paths} PARENT_SCOPE)
+endfunction()
+
+function (_PYTHON_GET_REGISTRIES _PYTHON_PGR_REGISTRY_PATHS)
+ cmake_parse_arguments (PARSE_ARGV 1 _PGR "" "" "IMPLEMENTATIONS;VERSION")
+
+ if (NOT _PGR_IMPLEMENTATIONS)
+ set (_PGR_IMPLEMENTATIONS ${_${_PYTHON_PREFIX}_FIND_IMPLEMENTATIONS})
+ endif()
+
+ set (registries)
+
+ foreach (implementation IN LISTS _PGR_IMPLEMENTATIONS)
+ if (implementation STREQUAL "CPython")
+ foreach (version IN LISTS _PGR_VERSION)
+ string (REPLACE "." "" version_no_dots ${version})
+ list (APPEND registries
+ [HKEY_CURRENT_USER\\SOFTWARE\\Python\\PythonCore\\${version}-${_${_PYTHON_PREFIX}_ARCH}\\InstallPath]
+ [HKEY_CURRENT_USER\\SOFTWARE\\Python\\PythonCore\\${version}-${_${_PYTHON_PREFIX}_ARCH2}\\InstallPath])
+ if (version VERSION_GREATER_EQUAL "3.5")
+ get_filename_component (arch "[HKEY_CURRENT_USER\\Software\\Python\\PythonCore\\${version};SysArchitecture]" NAME)
+ if (arch MATCHES "(${_${_PYTHON_PREFIX}_ARCH}|${_${_PYTHON_PREFIX}_ARCH2})bit")
+ list (APPEND registries
+ [HKEY_CURRENT_USER\\SOFTWARE\\Python\\PythonCore\\${version}\\InstallPath])
+ endif()
+ else()
+ list (APPEND registries
+ [HKEY_CURRENT_USER\\SOFTWARE\\Python\\PythonCore\\${version}\\InstallPath])
+ endif()
+ list (APPEND registries
+ [HKEY_CURRENT_USER\\SOFTWARE\\Python\\ContinuumAnalytics\\Anaconda${version_no_dots}-${_${_PYTHON_PREFIX}_ARCH}\\InstallPath]
+ [HKEY_CURRENT_USER\\SOFTWARE\\Python\\ContinuumAnalytics\\Anaconda${version_no_dots}-${_${_PYTHON_PREFIX}_ARCH2}\\InstallPath]
+ [HKEY_LOCAL_MACHINE\\SOFTWARE\\Python\\PythonCore\\${version}-${_${_PYTHON_PREFIX}_ARCH}\\InstallPath]
+ [HKEY_LOCAL_MACHINE\\SOFTWARE\\Python\\PythonCore\\${version}-${_${_PYTHON_PREFIX}_ARCH2}\\InstallPath]
+ [HKEY_LOCAL_MACHINE\\SOFTWARE\\Python\\PythonCore\\${version}\\InstallPath]
+ [HKEY_LOCAL_MACHINE\\SOFTWARE\\Python\\ContinuumAnalytics\\Anaconda${version_no_dots}-${_${_PYTHON_PREFIX}_ARCH}\\InstallPath]
+ [HKEY_LOCAL_MACHINE\\SOFTWARE\\Python\\ContinuumAnalytics\\Anaconda${version_no_dots}-${_${_PYTHON_PREFIX}_ARCH2}\\InstallPath])
+ endforeach()
+ elseif (implementation STREQUAL "IronPython")
+ foreach (version IN LISTS _PGR_VERSION)
+ list (APPEND registries [HKEY_LOCAL_MACHINE\\SOFTWARE\\IronPython\\${version}\\InstallPath])
+ endforeach()
+ endif()
+ endforeach()
+
+ set (${_PYTHON_PGR_REGISTRY_PATHS} "${registries}" PARENT_SCOPE)
+endfunction()
+
+
+function (_PYTHON_GET_ABIFLAGS _PGABIFLAGS)
+ set (abiflags)
+ list (GET _${_PYTHON_PREFIX}_FIND_ABI 0 pydebug)
+ list (GET _${_PYTHON_PREFIX}_FIND_ABI 1 pymalloc)
+ list (GET _${_PYTHON_PREFIX}_FIND_ABI 2 unicode)
+
+ if (pymalloc STREQUAL "ANY" AND unicode STREQUAL "ANY")
+ set (abiflags "mu" "m" "u" "")
+ elseif (pymalloc STREQUAL "ANY" AND unicode STREQUAL "ON")
+ set (abiflags "mu" "u")
+ elseif (pymalloc STREQUAL "ANY" AND unicode STREQUAL "OFF")
+ set (abiflags "m" "")
+ elseif (pymalloc STREQUAL "ON" AND unicode STREQUAL "ANY")
+ set (abiflags "mu" "m")
+ elseif (pymalloc STREQUAL "ON" AND unicode STREQUAL "ON")
+ set (abiflags "mu")
+ elseif (pymalloc STREQUAL "ON" AND unicode STREQUAL "OFF")
+ set (abiflags "m")
+ elseif (pymalloc STREQUAL "ON" AND unicode STREQUAL "ANY")
+ set (abiflags "u" "")
+ elseif (pymalloc STREQUAL "OFF" AND unicode STREQUAL "ON")
+ set (abiflags "u")
+ endif()
+
+ if (pydebug STREQUAL "ON")
+ if (abiflags)
+ list (TRANSFORM abiflags PREPEND "d")
+ else()
+ set (abiflags "d")
+ endif()
+ elseif (pydebug STREQUAL "ANY")
+ if (abiflags)
+ set (flags "${abiflags}")
+ list (TRANSFORM flags PREPEND "d")
+ list (APPEND abiflags "${flags}")
+ else()
+ set (abiflags "" "d")
+ endif()
+ endif()
+
+ set (${_PGABIFLAGS} "${abiflags}" PARENT_SCOPE)
+endfunction()
+
+function (_PYTHON_GET_PATH_SUFFIXES _PYTHON_PGPS_PATH_SUFFIXES)
+ cmake_parse_arguments (PARSE_ARGV 1 _PGPS "INTERPRETER;COMPILER;LIBRARY;INCLUDE" "" "IMPLEMENTATIONS;VERSION")
+
+ if (NOT _PGPS_IMPLEMENTATIONS)
+ set (_PGPS_IMPLEMENTATIONS ${_${_PYTHON_PREFIX}_FIND_IMPLEMENTATIONS})
+ endif()
+
+ if (DEFINED _${_PYTHON_PREFIX}_ABIFLAGS)
+ set (abi "${_${_PYTHON_PREFIX}_ABIFLAGS}")
+ else()
+ set (abi "mu" "m" "u" "")
+ endif()
+
+ set (path_suffixes)
+
+ foreach (implementation IN LISTS _PGPS_IMPLEMENTATIONS)
+ if (implementation STREQUAL "CPython")
+ if (_PGPS_INTERPRETER)
+ list (APPEND path_suffixes bin Scripts)
+ else()
+ foreach (version IN LISTS _PGPS_VERSION)
+ if (_PGPS_LIBRARY)
+ if (CMAKE_LIBRARY_ARCHITECTURE)
+ list (APPEND path_suffixes lib/${CMAKE_LIBRARY_ARCHITECTURE})
+ endif()
+ list (APPEND path_suffixes lib libs)
+
+ if (CMAKE_LIBRARY_ARCHITECTURE)
+ set (suffixes "${abi}")
+ if (suffixes)
+ list (TRANSFORM suffixes PREPEND "lib/python${_PGPS_VERSION}/config-${_PGPS_VERSION}")
+ list (TRANSFORM suffixes APPEND "-${CMAKE_LIBRARY_ARCHITECTURE}")
+ else()
+ set (suffixes "lib/python${_PGPS_VERSION}/config-${_PGPS_VERSION}-${CMAKE_LIBRARY_ARCHITECTURE}")
+ endif()
+ list (APPEND path_suffixes ${suffixes})
+ endif()
+ set (suffixes "${abi}")
+ if (suffixes)
+ list (TRANSFORM suffixes PREPEND "lib/python${_PGPS_VERSION}/config-${_PGPS_VERSION}")
+ else()
+ set (suffixes "lib/python${_PGPS_VERSION}/config-${_PGPS_VERSION}")
+ endif()
+ list (APPEND path_suffixes ${suffixes})
+ elseif (_PGPS_INCLUDE)
+ set (suffixes "${abi}")
+ if (suffixes)
+ list (TRANSFORM suffixes PREPEND "include/python${_PGPS_VERSION}")
+ else()
+ set (suffixes "include/python${_PGPS_VERSION}")
+ endif()
+ list (APPEND path_suffixes ${suffixes} include)
+ endif()
+ endforeach()
+ endif()
+ elseif (implementation STREQUAL "IronPython")
+ if (_PGPS_INTERPRETER OR _PGPS_COMPILER)
+ foreach (version IN LISTS _PGPS_VERSION)
+ list (APPEND path_suffixes "share/ironpython${version}")
+ endforeach()
+ list (APPEND path_suffixes ${_${_PYTHON_PREFIX}_IRON_PYTHON_PATH_SUFFIXES})
+ endif()
+ elseif (implementation STREQUAL "PyPy")
+ if (_PGPS_INTERPRETER)
+ list (APPEND path_suffixes ${_${_PYTHON_PREFIX}_PYPY_EXECUTABLE_PATH_SUFFIXES})
+ elseif (_PGPS_LIBRARY)
+ list (APPEND path_suffixes ${_${_PYTHON_PREFIX}_PYPY_LIBRARY_PATH_SUFFIXES})
+ elseif (_PGPS_INCLUDE)
+ list (APPEND path_suffixes ${_${_PYTHON_PREFIX}_PYPY_INCLUDE_PATH_SUFFIXES})
+ endif()
+ endif()
+ endforeach()
+ if (path_suffixes) # Behavior change in CMake 3.14
+ list (REMOVE_DUPLICATES path_suffixes)
+ endif ()
+
+ set (${_PYTHON_PGPS_PATH_SUFFIXES} ${path_suffixes} PARENT_SCOPE)
+endfunction()
+
+function (_PYTHON_GET_NAMES _PYTHON_PGN_NAMES)
+ cmake_parse_arguments (PARSE_ARGV 1 _PGN "POSIX;INTERPRETER;COMPILER;CONFIG;LIBRARY;WIN32;DEBUG" "" "IMPLEMENTATIONS;VERSION")
+
+ if (NOT _PGN_IMPLEMENTATIONS)
+ set (_PGN_IMPLEMENTATIONS ${_${_PYTHON_PREFIX}_FIND_IMPLEMENTATIONS})
+ endif()
+
+ set (names)
+
+ foreach (implementation IN LISTS _PGN_IMPLEMENTATIONS)
+ if (implementation STREQUAL "CPython")
+ if (_PGN_INTERPRETER AND _${_PYTHON_PREFIX}_FIND_UNVERSIONED_NAMES STREQUAL "FIRST")
+ list (APPEND names python${_${_PYTHON_PREFIX}_REQUIRED_VERSION_MAJOR} python)
+ endif()
+ foreach (version IN LISTS _PGN_VERSION)
+ if (_PGN_WIN32)
+ string (REPLACE "." "" version_no_dots ${version})
+
+ set (name python${version_no_dots})
+ if (_PGN_DEBUG)
+ string (APPEND name "_d")
+ endif()
+
+ list (APPEND names "${name}")
+ endif()
+
+ if (_PGN_POSIX)
+ if (DEFINED _${_PYTHON_PREFIX}_ABIFLAGS)
+ set (abi "${_${_PYTHON_PREFIX}_ABIFLAGS}")
+ else()
+ if (_PGN_INTERPRETER OR _PGN_CONFIG)
+ set (abi "")
+ else()
+ set (abi "mu" "m" "u" "")
+ endif()
+ endif()
+
+ if (abi)
+ if (_PGN_CONFIG AND DEFINED CMAKE_LIBRARY_ARCHITECTURE)
+ set (abinames "${abi}")
+ list (TRANSFORM abinames PREPEND "${CMAKE_LIBRARY_ARCHITECTURE}-python${version}")
+ list (TRANSFORM abinames APPEND "-config")
+ list (APPEND names ${abinames})
+ endif()
+ set (abinames "${abi}")
+ list (TRANSFORM abinames PREPEND "python${version}")
+ if (_PGN_CONFIG)
+ list (TRANSFORM abinames APPEND "-config")
+ endif()
+ list (APPEND names ${abinames})
+ else()
+ unset (abinames)
+ if (_PGN_CONFIG AND DEFINED CMAKE_LIBRARY_ARCHITECTURE)
+ set (abinames "${CMAKE_LIBRARY_ARCHITECTURE}-python${version}")
+ endif()
+ list (APPEND abinames "python${version}")
+ if (_PGN_CONFIG)
+ list (TRANSFORM abinames APPEND "-config")
+ endif()
+ list (APPEND names ${abinames})
+ endif()
+ endif()
+ endforeach()
+ if (_PGN_INTERPRETER AND _${_PYTHON_PREFIX}_FIND_UNVERSIONED_NAMES STREQUAL "LAST")
+ list (APPEND names python${_${_PYTHON_PREFIX}_REQUIRED_VERSION_MAJOR} python)
+ endif()
+ elseif (implementation STREQUAL "IronPython")
+ if (_PGN_INTERPRETER)
+ if (NOT CMAKE_SYSTEM_NAME STREQUAL "Linux")
+ # Do not use wrapper script on Linux because it is buggy: -c interpreter option cannot be used
+ foreach (version IN LISTS _PGN_VERSION)
+ list (APPEND names "ipy${version}")
+ endforeach()
+ endif()
+ list (APPEND names ${_${_PYTHON_PREFIX}_IRON_PYTHON_INTERPRETER_NAMES})
+ elseif (_PGN_COMPILER)
+ list (APPEND names ${_${_PYTHON_PREFIX}_IRON_PYTHON_COMPILER_NAMES})
+ endif()
+ elseif (implementation STREQUAL "PyPy")
+ if (_PGN_INTERPRETER)
+ list (APPEND names ${_${_PYTHON_PREFIX}_PYPY_NAMES})
+ elseif (_PGN_LIBRARY)
+ if (_PGN_WIN32)
+ foreach (version IN LISTS _PGN_VERSION)
+ string (REPLACE "." "" version_no_dots ${version})
+
+ set (name "python${version_no_dots}")
+ if (_PGN_DEBUG)
+ string (APPEND name "_d")
+ endif()
+ list (APPEND names "${name}")
+ endforeach()
+ endif()
+ list (APPEND names ${_${_PYTHON_PREFIX}_PYPY_LIB_NAMES})
+ endif()
+ endif()
+ endforeach()
+
+ set (${_PYTHON_PGN_NAMES} ${names} PARENT_SCOPE)
+endfunction()
+
+function (_PYTHON_GET_CONFIG_VAR _PYTHON_PGCV_VALUE NAME)
+ unset (${_PYTHON_PGCV_VALUE} PARENT_SCOPE)
+
+ if (NOT NAME MATCHES "^(PREFIX|ABIFLAGS|CONFIGDIR|INCLUDES|LIBS|SOABI)$")
+ return()
+ endif()
+
+ if (_${_PYTHON_PREFIX}_CONFIG)
+ if (NAME STREQUAL "SOABI")
+ set (config_flag "--extension-suffix")
+ else()
+ set (config_flag "--${NAME}")
+ endif()
+ string (TOLOWER "${config_flag}" config_flag)
+ execute_process (COMMAND "${_${_PYTHON_PREFIX}_CONFIG}" ${config_flag}
+ RESULT_VARIABLE _result
+ OUTPUT_VARIABLE _values
+ ERROR_QUIET
+ OUTPUT_STRIP_TRAILING_WHITESPACE)
+ if (_result)
+ unset (_values)
+ else()
+ if (NAME STREQUAL "INCLUDES")
+ # do some clean-up
+ string (REGEX MATCHALL "(-I|-iwithsysroot)[ ]*[^ ]+" _values "${_values}")
+ string (REGEX REPLACE "(-I|-iwithsysroot)[ ]*" "" _values "${_values}")
+ if (_values) # Behavior change in CMake 3.14
+ list (REMOVE_DUPLICATES _values)
+ endif ()
+ elseif (NAME STREQUAL "SOABI")
+ # clean-up: remove prefix character and suffix
+ if (_values MATCHES "^(\\.${CMAKE_SHARED_LIBRARY_SUFFIX}|\\.so|\\.pyd)$")
+ set(_values "")
+ else()
+ string (REGEX REPLACE "^[.-](.+)(${CMAKE_SHARED_LIBRARY_SUFFIX}|\\.(so|pyd))$" "\\1" _values "${_values}")
+ endif()
+ endif()
+ endif()
+ endif()
+
+ if (_${_PYTHON_PREFIX}_EXECUTABLE AND NOT CMAKE_CROSSCOMPILING)
+ if (NAME STREQUAL "PREFIX")
+ execute_process (COMMAND ${_${_PYTHON_PREFIX}_INTERPRETER_LAUNCHER} "${_${_PYTHON_PREFIX}_EXECUTABLE}" -c "import sys\ntry:\n from distutils import sysconfig\n sys.stdout.write(';'.join([sysconfig.PREFIX,sysconfig.EXEC_PREFIX,sysconfig.BASE_EXEC_PREFIX]))\nexcept Exception:\n import sysconfig\n sys.stdout.write(';'.join([sysconfig.get_config_var('base') or '', sysconfig.get_config_var('installed_base') or '']))"
+ RESULT_VARIABLE _result
+ OUTPUT_VARIABLE _values
+ ERROR_QUIET
+ OUTPUT_STRIP_TRAILING_WHITESPACE)
+ if (_result)
+ unset (_values)
+ else()
+ if (_values) # Behavior change in CMake 3.14
+ list (REMOVE_DUPLICATES _values)
+ endif ()
+ endif()
+ elseif (NAME STREQUAL "INCLUDES")
+ if (WIN32)
+ set (_scheme "nt")
+ else()
+ set (_scheme "posix_prefix")
+ endif()
+ execute_process (COMMAND ${_${_PYTHON_PREFIX}_INTERPRETER_LAUNCHER} "${_${_PYTHON_PREFIX}_EXECUTABLE}" -c
+ "import sys\ntry:\n from distutils import sysconfig\n sys.stdout.write(';'.join([sysconfig.get_python_inc(plat_specific=True),sysconfig.get_python_inc(plat_specific=False)]))\nexcept Exception:\n import sysconfig\n sys.stdout.write(';'.join([sysconfig.get_path('platinclude'),sysconfig.get_path('platinclude','${_scheme}'),sysconfig.get_path('include'),sysconfig.get_path('include','${_scheme}')]))"
+ RESULT_VARIABLE _result
+ OUTPUT_VARIABLE _values
+ ERROR_QUIET
+ OUTPUT_STRIP_TRAILING_WHITESPACE)
+ if (_result)
+ unset (_values)
+ else()
+ if (_values) # Behavior change in CMake 3.14
+ list (REMOVE_DUPLICATES _values)
+ endif ()
+ endif()
+ elseif (NAME STREQUAL "SOABI")
+ execute_process (COMMAND ${_${_PYTHON_PREFIX}_INTERPRETER_LAUNCHER} "${_${_PYTHON_PREFIX}_EXECUTABLE}" -c
+ "import sys\ntry:\n from distutils import sysconfig\n sys.stdout.write(';'.join([sysconfig.get_config_var('SOABI') or '',sysconfig.get_config_var('EXT_SUFFIX') or '',sysconfig.get_config_var('SO') or '']))\nexcept Exception:\n import sysconfig;sys.stdout.write(';'.join([sysconfig.get_config_var('SOABI') or '',sysconfig.get_config_var('EXT_SUFFIX') or '',sysconfig.get_config_var('SO') or '']))"
+ RESULT_VARIABLE _result
+ OUTPUT_VARIABLE _soabi
+ ERROR_QUIET
+ OUTPUT_STRIP_TRAILING_WHITESPACE)
+ if (_result)
+ unset (_values)
+ else()
+ foreach (_item IN LISTS _soabi)
+ if (_item)
+ set (_values "${_item}")
+ break()
+ endif()
+ endforeach()
+ if (_values)
+ # clean-up: remove prefix character and suffix
+ if (_values MATCHES "^(\\.${CMAKE_SHARED_LIBRARY_SUFFIX}|\\.so|\\.pyd)$")
+ set(_values "")
+ else()
+ string (REGEX REPLACE "^[.-](.+)(${CMAKE_SHARED_LIBRARY_SUFFIX}|\\.(so|pyd))$" "\\1" _values "${_values}")
+ endif()
+ endif()
+ endif()
+ else()
+ set (config_flag "${NAME}")
+ if (NAME STREQUAL "CONFIGDIR")
+ set (config_flag "LIBPL")
+ endif()
+ execute_process (COMMAND ${_${_PYTHON_PREFIX}_INTERPRETER_LAUNCHER} "${_${_PYTHON_PREFIX}_EXECUTABLE}" -c
+ "import sys\ntry:\n from distutils import sysconfig\n sys.stdout.write(sysconfig.get_config_var('${config_flag}'))\nexcept Exception:\n import sysconfig\n sys.stdout.write(sysconfig.get_config_var('${config_flag}'))"
+ RESULT_VARIABLE _result
+ OUTPUT_VARIABLE _values
+ ERROR_QUIET
+ OUTPUT_STRIP_TRAILING_WHITESPACE)
+ if (_result)
+ unset (_values)
+ endif()
+ endif()
+ endif()
+
+ if (NAME STREQUAL "ABIFLAGS" OR NAME STREQUAL "SOABI")
+ set (${_PYTHON_PGCV_VALUE} "${_values}" PARENT_SCOPE)
+ return()
+ endif()
+
+ if (NOT _values OR _values STREQUAL "None")
+ return()
+ endif()
+
+ if (NAME STREQUAL "LIBS")
+ # do some clean-up
+ string (REGEX MATCHALL "-(l|framework)[ ]*[^ ]+" _values "${_values}")
+ # remove elements relative to python library itself
+ list (FILTER _values EXCLUDE REGEX "-lpython")
+ if (_values) # Behavior change in CMake 3.14
+ list (REMOVE_DUPLICATES _values)
+ endif ()
+ endif()
+
+ if (WIN32 AND NAME MATCHES "^(PREFIX|CONFIGDIR|INCLUDES)$")
+ file (TO_CMAKE_PATH "${_values}" _values)
+ endif()
+
+ set (${_PYTHON_PGCV_VALUE} "${_values}" PARENT_SCOPE)
+endfunction()
+
+function (_PYTHON_GET_VERSION)
+ cmake_parse_arguments (PARSE_ARGV 0 _PGV "LIBRARY;INCLUDE" "PREFIX" "")
+
+ unset (${_PGV_PREFIX}VERSION PARENT_SCOPE)
+ unset (${_PGV_PREFIX}VERSION_MAJOR PARENT_SCOPE)
+ unset (${_PGV_PREFIX}VERSION_MINOR PARENT_SCOPE)
+ unset (${_PGV_PREFIX}VERSION_PATCH PARENT_SCOPE)
+ unset (${_PGV_PREFIX}ABI PARENT_SCOPE)
+
+ if (_PGV_LIBRARY)
+ # retrieve version and abi from library name
+ if (_${_PYTHON_PREFIX}_LIBRARY_RELEASE)
+ get_filename_component (library_name "${_${_PYTHON_PREFIX}_LIBRARY_RELEASE}" NAME)
+ # extract version from library name
+ if (library_name MATCHES "python([23])([0-9]+)")
+ set (${_PGV_PREFIX}VERSION_MAJOR "${CMAKE_MATCH_1}" PARENT_SCOPE)
+ set (${_PGV_PREFIX}VERSION_MINOR "${CMAKE_MATCH_2}" PARENT_SCOPE)
+ set (${_PGV_PREFIX}VERSION "${CMAKE_MATCH_1}.${CMAKE_MATCH_2}" PARENT_SCOPE)
+ set (${_PGV_PREFIX}ABI "" PARENT_SCOPE)
+ elseif (library_name MATCHES "python([23])\\.([0-9]+)([dmu]*)")
+ set (${_PGV_PREFIX}VERSION_MAJOR "${CMAKE_MATCH_1}" PARENT_SCOPE)
+ set (${_PGV_PREFIX}VERSION_MINOR "${CMAKE_MATCH_2}" PARENT_SCOPE)
+ set (${_PGV_PREFIX}VERSION "${CMAKE_MATCH_1}.${CMAKE_MATCH_2}" PARENT_SCOPE)
+ set (${_PGV_PREFIX}ABI "${CMAKE_MATCH_3}" PARENT_SCOPE)
+ elseif (library_name MATCHES "pypy(3)?-c")
+ set (version "${CMAKE_MATCH_1}")
+ if (version EQUAL "3")
+ set (${_PGV_PREFIX}VERSION_MAJOR "3" PARENT_SCOPE)
+ set (${_PGV_PREFIX}VERSION "3" PARENT_SCOPE)
+ else()
+ set (${_PGV_PREFIX}VERSION_MAJOR "2" PARENT_SCOPE)
+ set (${_PGV_PREFIX}VERSION "2" PARENT_SCOPE)
+ endif()
+ set (${_PGV_PREFIX}ABI "" PARENT_SCOPE)
+ endif()
+ endif()
+ else()
+ if (_${_PYTHON_PREFIX}_INCLUDE_DIR)
+ # retrieve version from header file
+ file (STRINGS "${_${_PYTHON_PREFIX}_INCLUDE_DIR}/patchlevel.h" version
+ REGEX "^#define[ \t]+PY_VERSION[ \t]+\"[^\"]+\"")
+ string (REGEX REPLACE "^#define[ \t]+PY_VERSION[ \t]+\"([^\"]+)\".*" "\\1"
+ version "${version}")
+ string (REGEX MATCHALL "[0-9]+" versions "${version}")
+ list (GET versions 0 version_major)
+ list (GET versions 1 version_minor)
+ list (GET versions 2 version_patch)
+
+ set (${_PGV_PREFIX}VERSION "${version_major}.${version_minor}.${version_patch}" PARENT_SCOPE)
+ set (${_PGV_PREFIX}VERSION_MAJOR ${version_major} PARENT_SCOPE)
+ set (${_PGV_PREFIX}VERSION_MINOR ${version_minor} PARENT_SCOPE)
+ set (${_PGV_PREFIX}VERSION_PATCH ${version_patch} PARENT_SCOPE)
+
+ # compute ABI flags
+ if (version_major VERSION_GREATER "2")
+ file (STRINGS "${_${_PYTHON_PREFIX}_INCLUDE_DIR}/pyconfig.h" config REGEX "(Py_DEBUG|WITH_PYMALLOC|Py_UNICODE_SIZE|MS_WIN32)")
+ set (abi)
+ if (config MATCHES "#[ ]*define[ ]+MS_WIN32")
+ # ABI not used on Windows
+ set (abi "")
+ else()
+ if (NOT config)
+ # pyconfig.h can be a wrapper to a platform specific pyconfig.h
+ # In this case, try to identify ABI from include directory
+ if (_${_PYTHON_PREFIX}_INCLUDE_DIR MATCHES "python${version_major}\\.${version_minor}+([dmu]*)")
+ set (abi "${CMAKE_MATCH_1}")
+ else()
+ set (abi "")
+ endif()
+ else()
+ if (config MATCHES "#[ ]*define[ ]+Py_DEBUG[ ]+1")
+ string (APPEND abi "d")
+ endif()
+ if (config MATCHES "#[ ]*define[ ]+WITH_PYMALLOC[ ]+1")
+ string (APPEND abi "m")
+ endif()
+ if (config MATCHES "#[ ]*define[ ]+Py_UNICODE_SIZE[ ]+4")
+ string (APPEND abi "u")
+ endif()
+ endif()
+ set (${_PGV_PREFIX}ABI "${abi}" PARENT_SCOPE)
+ endif()
+ else()
+ # ABI not supported
+ set (${_PGV_PREFIX}ABI "" PARENT_SCOPE)
+ endif()
+ endif()
+ endif()
+endfunction()
+
+function (_PYTHON_GET_LAUNCHER _PYTHON_PGL_NAME)
+ cmake_parse_arguments (PARSE_ARGV 1 _PGL "INTERPRETER;COMPILER" "" "")
+
+ unset ({_PYTHON_PGL_NAME} PARENT_SCOPE)
+
+ if ((_PGL_INTERPRETER AND NOT _${_PYTHON_PREFIX}_EXECUTABLE)
+ OR (_PGL_COMPILER AND NOT _${_PYTHON_PREFIX}_COMPILER))
+ return()
+ endif()
+
+ if ("IronPython" IN_LIST _${_PYTHON_PREFIX}_FIND_IMPLEMENTATIONS
+ AND NOT SYSTEM_NAME MATCHES "Windows|Linux")
+ if (_PGL_INTERPRETER)
+ get_filename_component (name "${_${_PYTHON_PREFIX}_EXECUTABLE}" NAME)
+ get_filename_component (ext "${_${_PYTHON_PREFIX}_EXECUTABLE}" LAST_EXT)
+ if (name IN_LIST _${_PYTHON_PREFIX}_IRON_PYTHON_INTERPRETER_NAMES
+ AND ext STREQUAL ".exe")
+ set (${_PYTHON_PGL_NAME} "${${_PYTHON_PREFIX}_DOTNET_LAUNCHER}" PARENT_SCOPE)
+ endif()
+ else()
+ get_filename_component (name "${_${_PYTHON_PREFIX}_COMPILER}" NAME)
+ get_filename_component (ext "${_${_PYTHON_PREFIX}_COMPILER}" LAST_EXT)
+ if (name IN_LIST _${_PYTHON_PREFIX}_IRON_PYTHON_COMPILER_NAMES
+ AND ext STREQUAL ".exe")
+ set (${_PYTHON_PGL_NAME} "${${_PYTHON_PREFIX}_DOTNET_LAUNCHER}" PARENT_SCOPE)
+ endif()
+ endif()
+ endif()
+endfunction()
+
+
+function (_PYTHON_VALIDATE_INTERPRETER)
+ if (NOT _${_PYTHON_PREFIX}_EXECUTABLE)
+ return()
+ endif()
+
+ cmake_parse_arguments (PARSE_ARGV 0 _PVI "IN_RANGE;EXACT;CHECK_EXISTS" "VERSION" "")
+
+ if (_PVI_CHECK_EXISTS AND NOT EXISTS "${_${_PYTHON_PREFIX}_EXECUTABLE}")
+ # interpreter does not exist anymore
+ set (_${_PYTHON_PREFIX}_Interpreter_REASON_FAILURE "Cannot find the interpreter \"${_${_PYTHON_PREFIX}_EXECUTABLE}\"" PARENT_SCOPE)
+ set_property (CACHE _${_PYTHON_PREFIX}_EXECUTABLE PROPERTY VALUE "${_PYTHON_PREFIX}_EXECUTABLE-NOTFOUND")
+ return()
+ endif()
+
+ _python_get_launcher (launcher INTERPRETER)
+
+ # validate ABI compatibility
+ if (DEFINED _${_PYTHON_PREFIX}_FIND_ABI)
+ execute_process (COMMAND ${launcher} "${_${_PYTHON_PREFIX}_EXECUTABLE}" -c
+ "import sys; sys.stdout.write(sys.abiflags)"
+ RESULT_VARIABLE result
+ OUTPUT_VARIABLE abi
+ ERROR_QUIET
+ OUTPUT_STRIP_TRAILING_WHITESPACE)
+ if (result)
+ # assume ABI is not supported
+ set (abi "")
+ endif()
+ if (NOT abi IN_LIST _${_PYTHON_PREFIX}_ABIFLAGS)
+ # incompatible ABI
+ set (_${_PYTHON_PREFIX}_Interpreter_REASON_FAILURE "Wrong ABI for the interpreter \"${_${_PYTHON_PREFIX}_EXECUTABLE}\"" PARENT_SCOPE)
+ set_property (CACHE _${_PYTHON_PREFIX}_EXECUTABLE PROPERTY VALUE "${_PYTHON_PREFIX}_EXECUTABLE-NOTFOUND")
+ return()
+ endif()
+ endif()
+
+ if (_PVI_IN_RANGE OR _PVI_VERSION)
+ # retrieve full version
+ execute_process (COMMAND ${launcher} "${_${_PYTHON_PREFIX}_EXECUTABLE}" -c
+ "import sys; sys.stdout.write('.'.join([str(x) for x in sys.version_info[:3]]))"
+ RESULT_VARIABLE result
+ OUTPUT_VARIABLE version
+ ERROR_QUIET
+ OUTPUT_STRIP_TRAILING_WHITESPACE)
+ if (result)
+ # interpreter is not usable
+ set (_${_PYTHON_PREFIX}_Interpreter_REASON_FAILURE "Cannot use the interpreter \"${_${_PYTHON_PREFIX}_EXECUTABLE}\"" PARENT_SCOPE)
+ set_property (CACHE _${_PYTHON_PREFIX}_EXECUTABLE PROPERTY VALUE "${_PYTHON_PREFIX}_EXECUTABLE-NOTFOUND")
+ return()
+ endif()
+
+ if (_PVI_VERSION)
+ # check against specified version
+ ## compute number of components for version
+ string (REGEX REPLACE "[^.]" "" dots "${_PVI_VERSION}")
+ ## add one dot because there is one dot less than there are components
+ string (LENGTH "${dots}." count)
+ if (count GREATER 3)
+ set (count 3)
+ endif()
+ set (version_regex "^[0-9]+")
+ if (count EQUAL 3)
+ string (APPEND version_regex "\\.[0-9]+\\.[0-9]+")
+ elseif (count EQUAL 2)
+ string (APPEND version_regex "\\.[0-9]+")
+ endif()
+ # extract needed range
+ string (REGEX MATCH "${version_regex}" version "${version}")
+
+ if (_PVI_EXACT AND NOT version VERSION_EQUAL _PVI_VERSION)
+ # interpreter has wrong version
+ set (_${_PYTHON_PREFIX}_Interpreter_REASON_FAILURE "Wrong version for the interpreter \"${_${_PYTHON_PREFIX}_EXECUTABLE}\"" PARENT_SCOPE)
+ set_property (CACHE _${_PYTHON_PREFIX}_EXECUTABLE PROPERTY VALUE "${_PYTHON_PREFIX}_EXECUTABLE-NOTFOUND")
+ return()
+ else()
+ # check that version is OK
+ string(REGEX REPLACE "^([0-9]+)\\.?.*$" "\\1" major_version "${version}")
+ string(REGEX REPLACE "^([0-9]+)\\.?.*$" "\\1" expected_major_version "${_PVI_VERSION}")
+ if (NOT major_version VERSION_EQUAL expected_major_version
+ OR NOT version VERSION_GREATER_EQUAL _PVI_VERSION)
+ set (_${_PYTHON_PREFIX}_Interpreter_REASON_FAILURE "Wrong version for the interpreter \"${_${_PYTHON_PREFIX}_EXECUTABLE}\"" PARENT_SCOPE)
+ set_property (CACHE _${_PYTHON_PREFIX}_EXECUTABLE PROPERTY VALUE "${_PYTHON_PREFIX}_EXECUTABLE-NOTFOUND")
+ return()
+ endif()
+ endif()
+ endif()
+
+ if (_PVI_IN_RANGE)
+ # check if version is in the requested range
+ find_package_check_version ("${version}" in_range HANDLE_VERSION_RANGE)
+ if (NOT in_range)
+ # interpreter has invalid version
+ set (_${_PYTHON_PREFIX}_Interpreter_REASON_FAILURE "Wrong version for the interpreter \"${_${_PYTHON_PREFIX}_EXECUTABLE}\"" PARENT_SCOPE)
+ set_property (CACHE _${_PYTHON_PREFIX}_EXECUTABLE PROPERTY VALUE "${_PYTHON_PREFIX}_EXECUTABLE-NOTFOUND")
+ return()
+ endif()
+ endif()
+ else()
+ get_filename_component (python_name "${_${_PYTHON_PREFIX}_EXECUTABLE}" NAME)
+ if (NOT python_name STREQUAL "python${_${_PYTHON_PREFIX}_REQUIRED_VERSION_MAJOR}${CMAKE_EXECUTABLE_SUFFIX}")
+ # executable found do not have version in name
+ # ensure major version is OK
+ execute_process (COMMAND ${launcher} "${_${_PYTHON_PREFIX}_EXECUTABLE}" -c
+ "import sys; sys.stdout.write(str(sys.version_info[0]))"
+ RESULT_VARIABLE result
+ OUTPUT_VARIABLE version
+ ERROR_QUIET
+ OUTPUT_STRIP_TRAILING_WHITESPACE)
+ if (result OR NOT version EQUAL _${_PYTHON_PREFIX}_REQUIRED_VERSION_MAJOR)
+ # interpreter not usable or has wrong major version
+ if (result)
+ set (_${_PYTHON_PREFIX}_Interpreter_REASON_FAILURE "Cannot use the interpreter \"${_${_PYTHON_PREFIX}_EXECUTABLE}\"" PARENT_SCOPE)
+ else()
+ set (_${_PYTHON_PREFIX}_Interpreter_REASON_FAILURE "Wrong major version for the interpreter \"${_${_PYTHON_PREFIX}_EXECUTABLE}\"" PARENT_SCOPE)
+ endif()
+ set_property (CACHE _${_PYTHON_PREFIX}_EXECUTABLE PROPERTY VALUE "${_PYTHON_PREFIX}_EXECUTABLE-NOTFOUND")
+ return()
+ endif()
+ endif()
+ endif()
+
+ if (CMAKE_SIZEOF_VOID_P AND ("Development.Module" IN_LIST ${_PYTHON_PREFIX}_FIND_COMPONENTS
+ OR "Development.Embed" IN_LIST ${_PYTHON_PREFIX}_FIND_COMPONENTS)
+ AND NOT CMAKE_CROSSCOMPILING)
+ # In this case, interpreter must have same architecture as environment
+ execute_process (COMMAND ${launcher} "${_${_PYTHON_PREFIX}_EXECUTABLE}" -c
+ "import sys, struct; sys.stdout.write(str(struct.calcsize(\"P\")))"
+ RESULT_VARIABLE result
+ OUTPUT_VARIABLE size
+ ERROR_QUIET
+ OUTPUT_STRIP_TRAILING_WHITESPACE)
+ if (result OR NOT size EQUAL CMAKE_SIZEOF_VOID_P)
+ # interpreter not usable or has wrong architecture
+ if (result)
+ set (_${_PYTHON_PREFIX}_Interpreter_REASON_FAILURE "Cannot use the interpreter \"${_${_PYTHON_PREFIX}_EXECUTABLE}\"" PARENT_SCOPE)
+ else()
+ set (_${_PYTHON_PREFIX}_Interpreter_REASON_FAILURE "Wrong architecture for the interpreter \"${_${_PYTHON_PREFIX}_EXECUTABLE}\"" PARENT_SCOPE)
+ endif()
+ set_property (CACHE _${_PYTHON_PREFIX}_EXECUTABLE PROPERTY VALUE "${_PYTHON_PREFIX}_EXECUTABLE-NOTFOUND")
+ return()
+ endif()
+ endif()
+endfunction()
+
+
+function (_PYTHON_VALIDATE_COMPILER)
+ if (NOT _${_PYTHON_PREFIX}_COMPILER)
+ return()
+ endif()
+
+ cmake_parse_arguments (PARSE_ARGV 0 _PVC "IN_RANGE;EXACT;CHECK_EXISTS" "VERSION" "")
+
+ if (_PVC_CHECK_EXISTS AND NOT EXISTS "${_${_PYTHON_PREFIX}_COMPILER}")
+ # Compiler does not exist anymore
+ set (_${_PYTHON_PREFIX}_Compiler_REASON_FAILURE "Cannot find the compiler \"${_${_PYTHON_PREFIX}_COMPILER}\"" PARENT_SCOPE)
+ set_property (CACHE _${_PYTHON_PREFIX}_COMPILER PROPERTY VALUE "${_PYTHON_PREFIX}_COMPILER-NOTFOUND")
+ return()
+ endif()
+
+ _python_get_launcher (launcher COMPILER)
+
+ # retrieve python environment version from compiler
+ set (working_dir "${CMAKE_CURRENT_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/PythonCompilerVersion.dir")
+ file (WRITE "${working_dir}/version.py" "import sys; sys.stdout.write('.'.join([str(x) for x in sys.version_info[:3]]))\n")
+ execute_process (COMMAND ${launcher} "${_${_PYTHON_PREFIX}_COMPILER}"
+ ${_${_PYTHON_PREFIX}_IRON_PYTHON_COMPILER_ARCH_FLAGS}
+ /target:exe /embed "${working_dir}/version.py"
+ WORKING_DIRECTORY "${working_dir}"
+ OUTPUT_QUIET
+ ERROR_QUIET
+ OUTPUT_STRIP_TRAILING_WHITESPACE)
+ get_filename_component (ir_dir "${_${_PYTHON_PREFIX}_COMPILER}" DIRECTORY)
+ execute_process (COMMAND "${CMAKE_COMMAND}" -E env "MONO_PATH=${ir_dir}"
+ ${${_PYTHON_PREFIX}_DOTNET_LAUNCHER} "${working_dir}/version.exe"
+ WORKING_DIRECTORY "${working_dir}"
+ RESULT_VARIABLE result
+ OUTPUT_VARIABLE version
+ ERROR_QUIET)
+ file (REMOVE_RECURSE "${working_dir}")
+ if (result)
+ # compiler is not usable
+ set (_${_PYTHON_PREFIX}_Compiler_REASON_FAILURE "Cannot use the compiler \"${_${_PYTHON_PREFIX}_COMPILER}\"" PARENT_SCOPE)
+ set_property (CACHE _${_PYTHON_PREFIX}_COMPILER PROPERTY VALUE "${_PYTHON_PREFIX}_COMPILER-NOTFOUND")
+ return()
+ endif()
+
+ if (_PVC_VERSION OR _PVC_IN_RANGE)
+ if (_PVC_VERSION)
+ # check against specified version
+ ## compute number of components for version
+ string (REGEX REPLACE "[^.]" "" dots "${_PVC_VERSION}")
+ ## add one dot because there is one dot less than there are components
+ string (LENGTH "${dots}." count)
+ if (count GREATER 3)
+ set (count 3)
+ endif()
+ set (version_regex "^[0-9]+")
+ if (count EQUAL 3)
+ string (APPEND version_regex "\\.[0-9]+\\.[0-9]+")
+ elseif (count EQUAL 2)
+ string (APPEND version_regex "\\.[0-9]+")
+ endif()
+ # extract needed range
+ string (REGEX MATCH "${version_regex}" version "${version}")
+
+ if (_PVC_EXACT AND NOT version VERSION_EQUAL _PVC_VERSION)
+ # interpreter has wrong version
+ set (_${_PYTHON_PREFIX}_Compiler_REASON_FAILURE "Wrong version for the compiler \"${_${_PYTHON_PREFIX}_COMPILER}\"" PARENT_SCOPE)
+ set_property (CACHE _${_PYTHON_PREFIX}_COMPILER PROPERTY VALUE "${_PYTHON_PREFIX}_COMPILER-NOTFOUND")
+ return()
+ else()
+ # check that version is OK
+ string(REGEX REPLACE "^([0-9]+)\\.?.*$" "\\1" major_version "${version}")
+ string(REGEX REPLACE "^([0-9]+)\\.?.*$" "\\1" expected_major_version "${_PVC_VERSION}")
+ if (NOT major_version VERSION_EQUAL expected_major_version
+ OR NOT version VERSION_GREATER_EQUAL _PVC_VERSION)
+ set (_${_PYTHON_PREFIX}_Compiler_REASON_FAILURE "Wrong version for the compiler \"${_${_PYTHON_PREFIX}_COMPILER}\"" PARENT_SCOPE)
+ set_property (CACHE _${_PYTHON_PREFIX}_COMPILER PROPERTY VALUE "${_PYTHON_PREFIX}_COMPILER-NOTFOUND")
+ return()
+ endif()
+ endif()
+ endif()
+
+ if (_PVC_IN_RANGE)
+ # check if version is in the requested range
+ find_package_check_version ("${version}" in_range HANDLE_VERSION_RANGE)
+ if (NOT in_range)
+ # interpreter has invalid version
+ set (_${_PYTHON_PREFIX}_Compiler_REASON_FAILURE "Wrong version for the compiler \"${_${_PYTHON_PREFIX}_COMPILER}\"" PARENT_SCOPE)
+ set_property (CACHE _${_PYTHON_PREFIX}_COMPILER PROPERTY VALUE "${_PYTHON_PREFIX}_COMPILER-NOTFOUND")
+ return()
+ endif()
+ endif()
+ else()
+ string(REGEX REPLACE "^([0-9]+)\\.?.*$" "\\1" major_version "${version}")
+ if (NOT major_version EQUAL _${_PYTHON_PREFIX}_REQUIRED_VERSION_MAJOR)
+ # Compiler has wrong major version
+ set (_${_PYTHON_PREFIX}_Compiler_REASON_FAILURE "Wrong major version for the compiler \"${_${_PYTHON_PREFIX}_COMPILER}\"" PARENT_SCOPE)
+ set_property (CACHE _${_PYTHON_PREFIX}_COMPILER PROPERTY VALUE "${_PYTHON_PREFIX}_COMPILER-NOTFOUND")
+ return()
+ endif()
+ endif()
+endfunction()
+
+
+function (_PYTHON_VALIDATE_LIBRARY)
+ if (NOT _${_PYTHON_PREFIX}_LIBRARY_RELEASE)
+ unset (_${_PYTHON_PREFIX}_LIBRARY_DEBUG)
+ return()
+ endif()
+
+ cmake_parse_arguments (PARSE_ARGV 0 _PVL "IN_RANGE;EXACT;CHECK_EXISTS" "VERSION" "")
+
+ if (_PVL_CHECK_EXISTS AND NOT EXISTS "${_${_PYTHON_PREFIX}_LIBRARY_RELEASE}")
+ # library does not exist anymore
+ set (_${_PYTHON_PREFIX}_Development_REASON_FAILURE "Cannot find the library \"${_${_PYTHON_PREFIX}_LIBRARY_RELEASE}\"" PARENT_SCOPE)
+ set_property (CACHE _${_PYTHON_PREFIX}_LIBRARY_RELEASE PROPERTY VALUE "${_PYTHON_PREFIX}_LIBRARY_RELEASE-NOTFOUND")
+ if (WIN32)
+ set_property (CACHE _${_PYTHON_PREFIX}_LIBRARY_DEBUG PROPERTY VALUE "${_PYTHON_PREFIX}_LIBRARY_DEBUG-NOTFOUND")
+ endif()
+ set_property (CACHE _${_PYTHON_PREFIX}_INCLUDE_DIR PROPERTY VALUE "${_PYTHON_PREFIX}_INCLUDE_DIR-NOTFOUND")
+ return()
+ endif()
+
+ # retrieve version and abi from library name
+ _python_get_version (LIBRARY PREFIX lib_)
+
+ if (DEFINED _${_PYTHON_PREFIX}_FIND_ABI AND NOT lib_ABI IN_LIST _${_PYTHON_PREFIX}_ABIFLAGS)
+ # incompatible ABI
+ set (_${_PYTHON_PREFIX}_Development_REASON_FAILURE "Wrong ABI for the library \"${_${_PYTHON_PREFIX}_LIBRARY_RELEASE}\"" PARENT_SCOPE)
+ set_property (CACHE _${_PYTHON_PREFIX}_LIBRARY_RELEASE PROPERTY VALUE "${_PYTHON_PREFIX}_LIBRARY_RELEASE-NOTFOUND")
+ else()
+ if (_PVL_VERSION OR _PVL_IN_RANGE)
+ if (_PVL_VERSION)
+ # library have only major.minor information
+ string (REGEX MATCH "[0-9](\\.[0-9]+)?" version "${_PVL_VERSION}")
+ if ((_PVL_EXACT AND NOT lib_VERSION VERSION_EQUAL version) OR (lib_VERSION VERSION_LESS version))
+ # library has wrong version
+ set (_${_PYTHON_PREFIX}_Development_REASON_FAILURE "Wrong version for the library \"${_${_PYTHON_PREFIX}_LIBRARY_RELEASE}\"" PARENT_SCOPE)
+ set_property (CACHE _${_PYTHON_PREFIX}_LIBRARY_RELEASE PROPERTY VALUE "${_PYTHON_PREFIX}_LIBRARY_RELEASE-NOTFOUND")
+ endif()
+ endif()
+
+ if (_${_PYTHON_PREFIX}_LIBRARY_RELEASE AND _PVL_IN_RANGE)
+ # check if library version is in the requested range
+ find_package_check_version ("${lib_VERSION}" in_range HANDLE_VERSION_RANGE)
+ if (NOT in_range)
+ # library has wrong version
+ set (_${_PYTHON_PREFIX}_Development_REASON_FAILURE "Wrong version for the library \"${_${_PYTHON_PREFIX}_LIBRARY_RELEASE}\"" PARENT_SCOPE)
+ set_property (CACHE _${_PYTHON_PREFIX}_LIBRARY_RELEASE PROPERTY VALUE "${_PYTHON_PREFIX}_LIBRARY_RELEASE-NOTFOUND")
+ endif()
+ endif()
+ else()
+ if (NOT lib_VERSION_MAJOR VERSION_EQUAL _${_PYTHON_PREFIX}_REQUIRED_VERSION_MAJOR)
+ # library has wrong major version
+ set (_${_PYTHON_PREFIX}_Development_REASON_FAILURE "Wrong major version for the library \"${_${_PYTHON_PREFIX}_LIBRARY_RELEASE}\"" PARENT_SCOPE)
+ set_property (CACHE _${_PYTHON_PREFIX}_LIBRARY_RELEASE PROPERTY VALUE "${_PYTHON_PREFIX}_LIBRARY_RELEASE-NOTFOUND")
+ endif()
+ endif()
+ endif()
+
+ if (NOT _${_PYTHON_PREFIX}_LIBRARY_RELEASE)
+ if (WIN32)
+ set_property (CACHE _${_PYTHON_PREFIX}_LIBRARY_DEBUG PROPERTY VALUE "${_PYTHON_PREFIX}_LIBRARY_DEBUG-NOTFOUND")
+ endif()
+ unset (_${_PYTHON_PREFIX}_RUNTIME_LIBRARY_RELEASE CACHE)
+ unset (_${_PYTHON_PREFIX}_RUNTIME_LIBRARY_DEBUG CACHE)
+ set_property (CACHE _${_PYTHON_PREFIX}_INCLUDE_DIR PROPERTY VALUE "${_PYTHON_PREFIX}_INCLUDE_DIR-NOTFOUND")
+ endif()
+endfunction()
+
+
+function (_PYTHON_VALIDATE_INCLUDE_DIR)
+ if (NOT _${_PYTHON_PREFIX}_INCLUDE_DIR)
+ return()
+ endif()
+
+ cmake_parse_arguments (PARSE_ARGV 0 _PVID "IN_RANGE;EXACT;CHECK_EXISTS" "VERSION" "")
+
+ if (_PVID_CHECK_EXISTS AND NOT EXISTS "${_${_PYTHON_PREFIX}_INCLUDE_DIR}")
+ # include file does not exist anymore
+ set (_${_PYTHON_PREFIX}_Development_REASON_FAILURE "Cannot find the directory \"${_${_PYTHON_PREFIX}_INCLUDE_DIR}\"" PARENT_SCOPE)
+ set_property (CACHE _${_PYTHON_PREFIX}_INCLUDE_DIR PROPERTY VALUE "${_PYTHON_PREFIX}_INCLUDE_DIR-NOTFOUND")
+ return()
+ endif()
+
+ # retrieve version from header file
+ _python_get_version (INCLUDE PREFIX inc_)
+
+ if (DEFINED _${_PYTHON_PREFIX}_FIND_ABI AND NOT inc_ABI IN_LIST _${_PYTHON_PREFIX}_ABIFLAGS)
+ # incompatible ABI
+ set (_${_PYTHON_PREFIX}_Development_REASON_FAILURE "Wrong ABI for the directory \"${_${_PYTHON_PREFIX}_INCLUDE_DIR}\"" PARENT_SCOPE)
+ set_property (CACHE _${_PYTHON_PREFIX}_INCLUDE_DIR PROPERTY VALUE "${_PYTHON_PREFIX}_INCLUDE_DIR-NOTFOUND")
+ else()
+ if (_PVID_VERSION OR _PVID_IN_RANGE)
+ if (_PVID_VERSION)
+ if ((_PVID_EXACT AND NOT inc_VERSION VERSION_EQUAL expected_version) OR (inc_VERSION VERSION_LESS expected_version))
+ # include dir has wrong version
+ set (_${_PYTHON_PREFIX}_Development_REASON_FAILURE "Wrong version for the directory \"${_${_PYTHON_PREFIX}_INCLUDE_DIR}\"" PARENT_SCOPE)
+ set_property (CACHE _${_PYTHON_PREFIX}_INCLUDE_DIR PROPERTY VALUE "${_PYTHON_PREFIX}_INCLUDE_DIR-NOTFOUND")
+ endif()
+ endif()
+
+ if (_${_PYTHON_PREFIX}_INCLUDE_DIR AND PVID_IN_RANGE)
+ # check if include dir is in the request range
+ find_package_check_version ("${inc_VERSION}" in_range HANDLE_VERSION_RANGE)
+ if (NOT in_range)
+ # include dir has wrong version
+ set (_${_PYTHON_PREFIX}_Development_REASON_FAILURE "Wrong version for the directory \"${_${_PYTHON_PREFIX}_INCLUDE_DIR}\"" PARENT_SCOPE)
+ set_property (CACHE _${_PYTHON_PREFIX}_INCLUDE_DIR PROPERTY VALUE "${_PYTHON_PREFIX}_INCLUDE_DIR-NOTFOUND")
+ endif()
+ endif()
+ else()
+ if (NOT inc_VERSION_MAJOR VERSION_EQUAL _${_PYTHON_PREFIX}_REQUIRED_VERSION_MAJOR)
+ # include dir has wrong major version
+ set (_${_PYTHON_PREFIX}_Development_REASON_FAILURE "Wrong major version for the directory \"${_${_PYTHON_PREFIX}_INCLUDE_DIR}\"" PARENT_SCOPE)
+ set_property (CACHE _${_PYTHON_PREFIX}_INCLUDE_DIR PROPERTY VALUE "${_PYTHON_PREFIX}_INCLUDE_DIR-NOTFOUND")
+ endif()
+ endif()
+ endif()
+endfunction()
+
+
+function (_PYTHON_FIND_RUNTIME_LIBRARY _PYTHON_LIB)
+ string (REPLACE "_RUNTIME" "" _PYTHON_LIB "${_PYTHON_LIB}")
+ # look at runtime part on systems supporting it
+ if (CMAKE_SYSTEM_NAME STREQUAL "Windows" OR
+ (CMAKE_SYSTEM_NAME MATCHES "MSYS|CYGWIN"
+ AND ${_PYTHON_LIB} MATCHES "${CMAKE_IMPORT_LIBRARY_SUFFIX}$"))
+ set (CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_SHARED_LIBRARY_SUFFIX})
+ # MSYS has a special syntax for runtime libraries
+ if (CMAKE_SYSTEM_NAME MATCHES "MSYS")
+ list (APPEND CMAKE_FIND_LIBRARY_PREFIXES "msys-")
+ endif()
+ find_library (${ARGV})
+ endif()
+endfunction()
+
+
+function (_PYTHON_SET_LIBRARY_DIRS _PYTHON_SLD_RESULT)
+ unset (_PYTHON_DIRS)
+ set (_PYTHON_LIBS ${ARGN})
+ foreach (_PYTHON_LIB IN LISTS _PYTHON_LIBS)
+ if (${_PYTHON_LIB})
+ get_filename_component (_PYTHON_DIR "${${_PYTHON_LIB}}" DIRECTORY)
+ list (APPEND _PYTHON_DIRS "${_PYTHON_DIR}")
+ endif()
+ endforeach()
+ if (_PYTHON_DIRS) # Behavior change in CMake 3.14
+ list (REMOVE_DUPLICATES _PYTHON_DIRS)
+ endif ()
+ set (${_PYTHON_SLD_RESULT} ${_PYTHON_DIRS} PARENT_SCOPE)
+endfunction()
+
+
+function (_PYTHON_SET_DEVELOPMENT_MODULE_FOUND module)
+ if ("Development.${module}" IN_LIST ${_PYTHON_PREFIX}_FIND_COMPONENTS)
+ string(TOUPPER "${module}" id)
+ set (module_found TRUE)
+
+ if ("LIBRARY" IN_LIST _${_PYTHON_PREFIX}_FIND_DEVELOPMENT_${id}_ARTIFACTS
+ AND NOT _${_PYTHON_PREFIX}_LIBRARY_RELEASE)
+ set (module_found FALSE)
+ endif()
+ if ("INCLUDE_DIR" IN_LIST _${_PYTHON_PREFIX}_FIND_DEVELOPMENT_${id}_ARTIFACTS
+ AND NOT _${_PYTHON_PREFIX}_INCLUDE_DIR)
+ set (module_found FALSE)
+ endif()
+
+ set (${_PYTHON_PREFIX}_Development.${module}_FOUND ${module_found} PARENT_SCOPE)
+ endif()
+endfunction()
+
+
+if (${_PYTHON_PREFIX}_FIND_VERSION_RANGE)
+ # range must include internal major version
+ if (${_PYTHON_PREFIX}_FIND_VERSION_MIN_MAJOR VERSION_GREATER _${_PYTHON_PREFIX}_REQUIRED_VERSION_MAJOR
+ OR ((${_PYTHON_PREFIX}_FIND_VERSION_RANGE_MAX STREQUAL "INCLUDE"
+ AND ${_PYTHON_PREFIX}_FIND_VERSION_MAX VERSION_LESS _${_PYTHON_PREFIX}_REQUIRED_VERSION_MAJOR)
+ OR (${_PYTHON_PREFIX}_FIND_VERSION_RANGE_MAX STREQUAL "EXCLUDE"
+ AND ${_PYTHON_PREFIX}_FIND_VERSION_MAX VERSION_LESS_EQUAL _${_PYTHON_PREFIX}_REQUIRED_VERSION_MAJOR)))
+ _python_display_failure ("Could NOT find ${_PYTHON_PREFIX}: Wrong version range specified is \"${${_PYTHON_PREFIX}_FIND_VERSION_RANGE}\", but expected version range must include major version \"${_${_PYTHON_PREFIX}_REQUIRED_VERSION_MAJOR}\"")
+
+ cmake_policy(POP)
+ return()
+ endif()
+else()
+ if (DEFINED ${_PYTHON_PREFIX}_FIND_VERSION_MAJOR
+ AND NOT ${_PYTHON_PREFIX}_FIND_VERSION_MAJOR VERSION_EQUAL _${_PYTHON_PREFIX}_REQUIRED_VERSION_MAJOR)
+ # If major version is specified, it must be the same as internal major version
+ _python_display_failure ("Could NOT find ${_PYTHON_PREFIX}: Wrong major version specified is \"${${_PYTHON_PREFIX}_FIND_VERSION_MAJOR}\", but expected major version is \"${_${_PYTHON_PREFIX}_REQUIRED_VERSION_MAJOR}\"")
+
+ cmake_policy(POP)
+ return()
+ endif()
+endif()
+
+
+# handle components
+if (NOT ${_PYTHON_PREFIX}_FIND_COMPONENTS)
+ set (${_PYTHON_PREFIX}_FIND_COMPONENTS Interpreter)
+ set (${_PYTHON_PREFIX}_FIND_REQUIRED_Interpreter TRUE)
+endif()
+if ("NumPy" IN_LIST ${_PYTHON_PREFIX}_FIND_COMPONENTS)
+ list (APPEND ${_PYTHON_PREFIX}_FIND_COMPONENTS "Interpreter" "Development.Module")
+endif()
+if ("Development" IN_LIST ${_PYTHON_PREFIX}_FIND_COMPONENTS)
+ list (APPEND ${_PYTHON_PREFIX}_FIND_COMPONENTS "Development.Module" "Development.Embed")
+endif()
+if (${_PYTHON_PREFIX}_FIND_COMPONENTS) # Behavior change in CMake 3.14
+ list (REMOVE_DUPLICATES ${_PYTHON_PREFIX}_FIND_COMPONENTS)
+endif ()
+foreach (_${_PYTHON_PREFIX}_COMPONENT IN ITEMS Interpreter Compiler Development Development.Module Development.Embed NumPy)
+ set (${_PYTHON_PREFIX}_${_${_PYTHON_PREFIX}_COMPONENT}_FOUND FALSE)
+endforeach()
+if (${_PYTHON_PREFIX}_FIND_REQUIRED_Development)
+ set (${_PYTHON_PREFIX}_FIND_REQUIRED_Development.Module TRUE)
+ set (${_PYTHON_PREFIX}_FIND_REQUIRED_Development.Embed TRUE)
+endif()
+
+unset (_${_PYTHON_PREFIX}_FIND_DEVELOPMENT_ARTIFACTS)
+unset (_${_PYTHON_PREFIX}_FIND_DEVELOPMENT_MODULE_ARTIFACTS)
+unset (_${_PYTHON_PREFIX}_FIND_DEVELOPMENT_EMBED_ARTIFACTS)
+if ("Development.Module" IN_LIST ${_PYTHON_PREFIX}_FIND_COMPONENTS)
+ if (CMAKE_SYSTEM_NAME MATCHES "^(Windows.*|CYGWIN|MSYS)$")
+ list (APPEND _${_PYTHON_PREFIX}_FIND_DEVELOPMENT_MODULE_ARTIFACTS "LIBRARY")
+ endif()
+ list (APPEND _${_PYTHON_PREFIX}_FIND_DEVELOPMENT_MODULE_ARTIFACTS "INCLUDE_DIR")
+endif()
+if ("Development.Embed" IN_LIST ${_PYTHON_PREFIX}_FIND_COMPONENTS)
+ list (APPEND _${_PYTHON_PREFIX}_FIND_DEVELOPMENT_EMBED_ARTIFACTS "LIBRARY" "INCLUDE_DIR")
+endif()
+set (_${_PYTHON_PREFIX}_FIND_DEVELOPMENT_ARTIFACTS ${_${_PYTHON_PREFIX}_FIND_DEVELOPMENT_MODULE_ARTIFACTS} ${_${_PYTHON_PREFIX}_FIND_DEVELOPMENT_EMBED_ARTIFACTS})
+if (_${_PYTHON_PREFIX}_FIND_DEVELOPMENT_ARTIFACTS) # Behavior change in CMake 3.14
+ list (REMOVE_DUPLICATES _${_PYTHON_PREFIX}_FIND_DEVELOPMENT_ARTIFACTS)
+endif ()
+
+# Set versions to search
+## default: search any version
+set (_${_PYTHON_PREFIX}_FIND_VERSIONS ${_${_PYTHON_PREFIX}_VERSIONS})
+unset (_${_PYTHON_PREFIX}_FIND_VERSION_EXACT)
+
+if (${_PYTHON_PREFIX}_FIND_VERSION_RANGE)
+ unset (_${_PYTHON_PREFIX}_FIND_VERSIONS)
+ foreach (_${_PYTHON_PREFIX}_VERSION IN LISTS _${_PYTHON_PREFIX}_VERSIONS)
+ if ((${_PYTHON_PREFIX}_FIND_VERSION_RANGE_MIN STREQUAL "INCLUDE"
+ AND _${_PYTHON_PREFIX}_VERSION VERSION_GREATER_EQUAL ${_PYTHON_PREFIX}_FIND_VERSION_MIN)
+ AND ((${_PYTHON_PREFIX}_FIND_VERSION_RANGE_MAX STREQUAL "INCLUDE"
+ AND _${_PYTHON_PREFIX}_VERSION VERSION_LESS_EQUAL ${_PYTHON_PREFIX}_FIND_VERSION_MAX)
+ OR (${_PYTHON_PREFIX}_FIND_VERSION_RANGE_MAX STREQUAL "EXCLUDE"
+ AND _${_PYTHON_PREFIX}_VERSION VERSION_LESS ${_PYTHON_PREFIX}_FIND_VERSION_MAX)))
+ list (APPEND _${_PYTHON_PREFIX}_FIND_VERSIONS ${_${_PYTHON_PREFIX}_VERSION})
+ endif()
+ endforeach()
+else()
+ if (${_PYTHON_PREFIX}_FIND_VERSION_COUNT GREATER 1)
+ if (${_PYTHON_PREFIX}_FIND_VERSION_EXACT)
+ set (_${_PYTHON_PREFIX}_FIND_VERSION_EXACT "EXACT")
+ set (_${_PYTHON_PREFIX}_FIND_VERSIONS ${${_PYTHON_PREFIX}_FIND_VERSION_MAJOR}.${${_PYTHON_PREFIX}_FIND_VERSION_MINOR})
+ else()
+ unset (_${_PYTHON_PREFIX}_FIND_VERSIONS)
+ # add all compatible versions
+ foreach (_${_PYTHON_PREFIX}_VERSION IN LISTS _${_PYTHON_PREFIX}_VERSIONS)
+ if (_${_PYTHON_PREFIX}_VERSION VERSION_GREATER_EQUAL "${${_PYTHON_PREFIX}_FIND_VERSION_MAJOR}.${${_PYTHON_PREFIX}_FIND_VERSION_MINOR}")
+ list (APPEND _${_PYTHON_PREFIX}_FIND_VERSIONS ${_${_PYTHON_PREFIX}_VERSION})
+ endif()
+ endforeach()
+ endif()
+ endif()
+endif()
+
+# Set ABIs to search
+## default: search any ABI
+if (_${_PYTHON_PREFIX}_REQUIRED_VERSION_MAJOR VERSION_LESS "3")
+ # ABI not supported
+ unset (_${_PYTHON_PREFIX}_FIND_ABI)
+ set (_${_PYTHON_PREFIX}_ABIFLAGS "")
+else()
+ unset (_${_PYTHON_PREFIX}_FIND_ABI)
+ unset (_${_PYTHON_PREFIX}_ABIFLAGS)
+ if (DEFINED ${_PYTHON_PREFIX}_FIND_ABI)
+ # normalization
+ string (TOUPPER "${${_PYTHON_PREFIX}_FIND_ABI}" _${_PYTHON_PREFIX}_FIND_ABI)
+ list (TRANSFORM _${_PYTHON_PREFIX}_FIND_ABI REPLACE "^(TRUE|Y(ES)?|1)$" "ON")
+ list (TRANSFORM _${_PYTHON_PREFIX}_FIND_ABI REPLACE "^(FALSE|N(O)?|0)$" "OFF")
+ if (NOT _${_PYTHON_PREFIX}_FIND_ABI MATCHES "^(ON|OFF|ANY);(ON|OFF|ANY);(ON|OFF|ANY)$")
+ message (AUTHOR_WARNING "Find${_PYTHON_PREFIX}: ${${_PYTHON_PREFIX}_FIND_ABI}: invalid value for '${_PYTHON_PREFIX}_FIND_ABI'. Ignore it")
+ unset (_${_PYTHON_PREFIX}_FIND_ABI)
+ endif()
+ _python_get_abiflags (_${_PYTHON_PREFIX}_ABIFLAGS)
+ endif()
+endif()
+unset (${_PYTHON_PREFIX}_SOABI)
+
+# Define lookup strategy
+if(POLICY CMP0094)
+ cmake_policy (GET CMP0094 _${_PYTHON_PREFIX}_LOOKUP_POLICY)
+else()
+ set (_${_PYTHON_PREFIX}_LOOKUP_POLICY "OLD")
+endif()
+if (_${_PYTHON_PREFIX}_LOOKUP_POLICY STREQUAL "NEW")
+ set (_${_PYTHON_PREFIX}_FIND_STRATEGY "LOCATION")
+else()
+ set (_${_PYTHON_PREFIX}_FIND_STRATEGY "VERSION")
+endif()
+if (DEFINED ${_PYTHON_PREFIX}_FIND_STRATEGY)
+ if (NOT ${_PYTHON_PREFIX}_FIND_STRATEGY MATCHES "^(VERSION|LOCATION)$")
+ message (AUTHOR_WARNING "Find${_PYTHON_PREFIX}: ${${_PYTHON_PREFIX}_FIND_STRATEGY}: invalid value for '${_PYTHON_PREFIX}_FIND_STRATEGY'. 'VERSION' or 'LOCATION' expected.")
+ set (_${_PYTHON_PREFIX}_FIND_STRATEGY "VERSION")
+ else()
+ set (_${_PYTHON_PREFIX}_FIND_STRATEGY "${${_PYTHON_PREFIX}_FIND_STRATEGY}")
+ endif()
+endif()
+
+# Python and Anaconda distributions: define which architectures can be used
+if (CMAKE_SIZEOF_VOID_P)
+ # In this case, search only for 64bit or 32bit
+ math (EXPR _${_PYTHON_PREFIX}_ARCH "${CMAKE_SIZEOF_VOID_P} * 8")
+ set (_${_PYTHON_PREFIX}_ARCH2 ${_${_PYTHON_PREFIX}_ARCH})
+else()
+ # architecture unknown, search for both 64bit and 32bit
+ set (_${_PYTHON_PREFIX}_ARCH 64)
+ set (_${_PYTHON_PREFIX}_ARCH2 32)
+endif()
+
+# IronPython support
+unset (_${_PYTHON_PREFIX}_IRON_PYTHON_INTERPRETER_NAMES)
+unset (_${_PYTHON_PREFIX}_IRON_PYTHON_COMPILER_NAMES)
+unset (_${_PYTHON_PREFIX}_IRON_PYTHON_COMPILER_ARCH_FLAGS)
+if (CMAKE_SIZEOF_VOID_P)
+ if (_${_PYTHON_PREFIX}_ARCH EQUAL "32")
+ set (_${_PYTHON_PREFIX}_IRON_PYTHON_COMPILER_ARCH_FLAGS "/platform:x86")
+ else()
+ set (_${_PYTHON_PREFIX}_IRON_PYTHON_COMPILER_ARCH_FLAGS "/platform:x64")
+ endif()
+endif()
+if (NOT CMAKE_SYSTEM_NAME STREQUAL "Linux")
+ # Do not use wrapper script on Linux because it is buggy: -c interpreter option cannot be used
+ list (APPEND _${_PYTHON_PREFIX}_IRON_PYTHON_INTERPRETER_NAMES "ipy${_${_PYTHON_PREFIX}_REQUIRED_VERSION_MAJOR}" "ipy64" "ipy32" "ipy")
+ list (APPEND _${_PYTHON_PREFIX}_IRON_PYTHON_COMPILER_NAMES "ipyc")
+endif()
+list (APPEND _${_PYTHON_PREFIX}_IRON_PYTHON_INTERPRETER_NAMES "ipy.exe")
+list (APPEND _${_PYTHON_PREFIX}_IRON_PYTHON_COMPILER_NAMES "ipyc.exe")
+set (_${_PYTHON_PREFIX}_IRON_PYTHON_PATH_SUFFIXES net45 net40 bin)
+
+# PyPy support
+if (_${_PYTHON_PREFIX}_REQUIRED_VERSION_MAJOR EQUAL "3")
+ set (_${_PYTHON_PREFIX}_PYPY_NAMES pypy3)
+ set (_${_PYTHON_PREFIX}_PYPY_LIB_NAMES pypy3-c)
+ if (WIN32)
+ # special name for runtime part
+ list (APPEND _${_PYTHON_PREFIX}_PYPY_LIB_NAMES libpypy3-c)
+ endif()
+ set (_${_PYTHON_PREFIX}_PYPY_INCLUDE_PATH_SUFFIXES lib/pypy3)
+else()
+ set (_${_PYTHON_PREFIX}_PYPY_NAMES pypy)
+ set (_${_PYTHON_PREFIX}_PYPY_LIB_NAMES pypy-c)
+ if (WIN32)
+ # special name for runtime part
+ list (APPEND _${_PYTHON_PREFIX}_PYPY_LIB_NAMES libpypy-c)
+ endif()
+ set (_${_PYTHON_PREFIX}_PYPY_INCLUDE_PATH_SUFFIXES lib/pypy)
+endif()
+set (_${_PYTHON_PREFIX}_PYPY_EXECUTABLE_PATH_SUFFIXES bin)
+set (_${_PYTHON_PREFIX}_PYPY_LIBRARY_PATH_SUFFIXES lib libs bin)
+list (APPEND _${_PYTHON_PREFIX}_PYPY_INCLUDE_PATH_SUFFIXES include)
+
+# Python Implementations handling
+unset (_${_PYTHON_PREFIX}_FIND_IMPLEMENTATIONS)
+if (DEFINED ${_PYTHON_PREFIX}_FIND_IMPLEMENTATIONS)
+ foreach (_${_PYTHON_PREFIX}_IMPLEMENTATION IN LISTS ${_PYTHON_PREFIX}_FIND_IMPLEMENTATIONS)
+ if (NOT _${_PYTHON_PREFIX}_IMPLEMENTATION MATCHES "^(CPython|IronPython|PyPy)$")
+ message (AUTHOR_WARNING "Find${_PYTHON_PREFIX}: ${_${_PYTHON_PREFIX}_IMPLEMENTATION}: invalid value for '${_PYTHON_PREFIX}_FIND_IMPLEMENTATIONS'. 'CPython', 'IronPython' or 'PyPy' expected. Value will be ignored.")
+ else()
+ list (APPEND _${_PYTHON_PREFIX}_FIND_IMPLEMENTATIONS ${_${_PYTHON_PREFIX}_IMPLEMENTATION})
+ endif()
+ endforeach()
+else()
+ if (WIN32)
+ set (_${_PYTHON_PREFIX}_FIND_IMPLEMENTATIONS CPython IronPython)
+ else()
+ set (_${_PYTHON_PREFIX}_FIND_IMPLEMENTATIONS CPython)
+ endif()
+endif()
+
+# compute list of names for header file
+unset (_${_PYTHON_PREFIX}_INCLUDE_NAMES)
+foreach (_${_PYTHON_PREFIX}_IMPLEMENTATION IN LISTS _${_PYTHON_PREFIX}_FIND_IMPLEMENTATIONS)
+ if (_${_PYTHON_PREFIX}_IMPLEMENTATION STREQUAL "CPython")
+ list (APPEND _${_PYTHON_PREFIX}_INCLUDE_NAMES "Python.h")
+ elseif (_${_PYTHON_PREFIX}_IMPLEMENTATION STREQUAL "PyPy")
+ list (APPEND _${_PYTHON_PREFIX}_INCLUDE_NAMES "PyPy.h")
+ endif()
+endforeach()
+
+
+# Apple frameworks handling
+_python_find_frameworks ()
+
+set (_${_PYTHON_PREFIX}_FIND_FRAMEWORK "FIRST")
+
+if (DEFINED ${_PYTHON_PREFIX}_FIND_FRAMEWORK)
+ if (NOT ${_PYTHON_PREFIX}_FIND_FRAMEWORK MATCHES "^(FIRST|LAST|NEVER)$")
+ message (AUTHOR_WARNING "Find${_PYTHON_PREFIX}: ${${_PYTHON_PREFIX}_FIND_FRAMEWORK}: invalid value for '${_PYTHON_PREFIX}_FIND_FRAMEWORK'. 'FIRST', 'LAST' or 'NEVER' expected. 'FIRST' will be used instead.")
+ else()
+ set (_${_PYTHON_PREFIX}_FIND_FRAMEWORK ${${_PYTHON_PREFIX}_FIND_FRAMEWORK})
+ endif()
+elseif (DEFINED CMAKE_FIND_FRAMEWORK)
+ if (CMAKE_FIND_FRAMEWORK STREQUAL "ONLY")
+ message (AUTHOR_WARNING "Find${_PYTHON_PREFIX}: CMAKE_FIND_FRAMEWORK: 'ONLY' value is not supported. 'FIRST' will be used instead.")
+ elseif (NOT CMAKE_FIND_FRAMEWORK MATCHES "^(FIRST|LAST|NEVER)$")
+ message (AUTHOR_WARNING "Find${_PYTHON_PREFIX}: ${CMAKE_FIND_FRAMEWORK}: invalid value for 'CMAKE_FIND_FRAMEWORK'. 'FIRST', 'LAST' or 'NEVER' expected. 'FIRST' will be used instead.")
+ else()
+ set (_${_PYTHON_PREFIX}_FIND_FRAMEWORK ${CMAKE_FIND_FRAMEWORK})
+ endif()
+endif()
+
+# Save CMAKE_FIND_APPBUNDLE
+if (DEFINED CMAKE_FIND_APPBUNDLE)
+ set (_${_PYTHON_PREFIX}_CMAKE_FIND_APPBUNDLE ${CMAKE_FIND_APPBUNDLE})
+else()
+ unset (_${_PYTHON_PREFIX}_CMAKE_FIND_APPBUNDLE)
+endif()
+# To avoid app bundle lookup
+set (CMAKE_FIND_APPBUNDLE "NEVER")
+
+# Save CMAKE_FIND_FRAMEWORK
+if (DEFINED CMAKE_FIND_FRAMEWORK)
+ set (_${_PYTHON_PREFIX}_CMAKE_FIND_FRAMEWORK ${CMAKE_FIND_FRAMEWORK})
+else()
+ unset (_${_PYTHON_PREFIX}_CMAKE_FIND_FRAMEWORK)
+endif()
+# To avoid framework lookup
+set (CMAKE_FIND_FRAMEWORK "NEVER")
+
+# Windows Registry handling
+if (DEFINED ${_PYTHON_PREFIX}_FIND_REGISTRY)
+ if (NOT ${_PYTHON_PREFIX}_FIND_REGISTRY MATCHES "^(FIRST|LAST|NEVER)$")
+ message (AUTHOR_WARNING "Find${_PYTHON_PREFIX}: ${${_PYTHON_PREFIX}_FIND_REGISTRY}: invalid value for '${_PYTHON_PREFIX}_FIND_REGISTRY'. 'FIRST', 'LAST' or 'NEVER' expected. 'FIRST' will be used instead.")
+ set (_${_PYTHON_PREFIX}_FIND_REGISTRY "FIRST")
+ else()
+ set (_${_PYTHON_PREFIX}_FIND_REGISTRY ${${_PYTHON_PREFIX}_FIND_REGISTRY})
+ endif()
+else()
+ set (_${_PYTHON_PREFIX}_FIND_REGISTRY "FIRST")
+endif()
+
+# virtual environments recognition
+if (DEFINED ENV{VIRTUAL_ENV} OR DEFINED ENV{CONDA_PREFIX})
+ if (DEFINED ${_PYTHON_PREFIX}_FIND_VIRTUALENV)
+ if (NOT ${_PYTHON_PREFIX}_FIND_VIRTUALENV MATCHES "^(FIRST|ONLY|STANDARD)$")
+ message (AUTHOR_WARNING "Find${_PYTHON_PREFIX}: ${${_PYTHON_PREFIX}_FIND_VIRTUALENV}: invalid value for '${_PYTHON_PREFIX}_FIND_VIRTUALENV'. 'FIRST', 'ONLY' or 'STANDARD' expected. 'FIRST' will be used instead.")
+ set (_${_PYTHON_PREFIX}_FIND_VIRTUALENV "FIRST")
+ else()
+ set (_${_PYTHON_PREFIX}_FIND_VIRTUALENV ${${_PYTHON_PREFIX}_FIND_VIRTUALENV})
+ endif()
+ else()
+ set (_${_PYTHON_PREFIX}_FIND_VIRTUALENV FIRST)
+ endif()
+else()
+ set (_${_PYTHON_PREFIX}_FIND_VIRTUALENV STANDARD)
+endif()
+
+
+# Python naming handling
+if (DEFINED ${_PYTHON_PREFIX}_FIND_UNVERSIONED_NAMES)
+ if (NOT ${_PYTHON_PREFIX}_FIND_UNVERSIONED_NAMES MATCHES "^(FIRST|LAST|NEVER)$")
+ message (AUTHOR_WARNING "Find${_PYTHON_PREFIX}: ${_${_PYTHON_PREFIX}_FIND_UNVERSIONED_NAMES}: invalid value for '${_PYTHON_PREFIX}_FIND_UNVERSIONED_NAMES'. 'FIRST', 'LAST' or 'NEVER' expected. 'LAST' will be used instead.")
+ set (_${_PYTHON_PREFIX}_FIND_UNVERSIONED_NAMES LAST)
+ else()
+ set (_${_PYTHON_PREFIX}_FIND_UNVERSIONED_NAMES ${${_PYTHON_PREFIX}_FIND_UNVERSIONED_NAMES})
+ endif()
+else()
+ set (_${_PYTHON_PREFIX}_FIND_UNVERSIONED_NAMES LAST)
+endif()
+
+
+# Compute search signature
+# This signature will be used to check validity of cached variables on new search
+set (_${_PYTHON_PREFIX}_SIGNATURE "${${_PYTHON_PREFIX}_ROOT_DIR}:${_${_PYTHON_PREFIX}_FIND_IMPLEMENTATIONS}:${_${_PYTHON_PREFIX}_FIND_STRATEGY}:${${_PYTHON_PREFIX}_FIND_VIRTUALENV}${_${_PYTHON_PREFIX}_FIND_UNVERSIONED_NAMES}")
+if (NOT WIN32)
+ string (APPEND _${_PYTHON_PREFIX}_SIGNATURE ":${${_PYTHON_PREFIX}_USE_STATIC_LIBS}:")
+endif()
+if (CMAKE_HOST_APPLE)
+ string (APPEND _${_PYTHON_PREFIX}_SIGNATURE ":${_${_PYTHON_PREFIX}_FIND_FRAMEWORK}")
+endif()
+if (CMAKE_HOST_WIN32)
+ string (APPEND _${_PYTHON_PREFIX}_SIGNATURE ":${_${_PYTHON_PREFIX}_FIND_REGISTRY}")
+endif()
+
+function (_PYTHON_CHECK_DEVELOPMENT_SIGNATURE module)
+ if ("Development.${module}" IN_LIST ${_PYTHON_PREFIX}_FIND_COMPONENTS)
+ string (TOUPPER "${module}" id)
+ set (signature "${_${_PYTHON_PREFIX}_SIGNATURE}:")
+ if ("LIBRARY" IN_LIST _${_PYTHON_PREFIX}_FIND_DEVELOPMENT_${id}_ARTIFACTS)
+ list (APPEND signature "${_${_PYTHON_PREFIX}_LIBRARY_RELEASE}:")
+ endif()
+ if ("INCLUDE_DIR" IN_LIST _${_PYTHON_PREFIX}_FIND_DEVELOPMENT_${id}_ARTIFACTS)
+ list (APPEND signature "${_${_PYTHON_PREFIX}_INCLUDE_DIR}:")
+ endif()
+ string (MD5 signature "${signature}")
+ if (signature STREQUAL _${_PYTHON_PREFIX}_DEVELOPMENT_${id}_SIGNATURE)
+ if ("LIBRARY" IN_LIST _${_PYTHON_PREFIX}_FIND_DEVELOPMENT_${id}_ARTIFACTS)
+ if (${_PYTHON_PREFIX}_FIND_VERSION_EXACT)
+ _python_validate_library (VERSION ${${_PYTHON_PREFIX}_FIND_VERSION} EXACT CHECK_EXISTS)
+ elseif (${_PYTHON_PREFIX}_FIND_VERSION_RANGE)
+ _python_validate_library (IN_RANGE CHECK_EXISTS)
+ elseif (DEFINED ${_PYTHON_PREFIX}_FIND_VERSION)
+ _python_validate_library (VERSION ${${_PYTHON_PREFIX}_FIND_VERSION} CHECK_EXISTS)
+ else()
+ _python_validate_library (CHECK_EXISTS)
+ endif()
+ endif()
+ if ("INCLUDE_DIR" IN_LIST _${_PYTHON_PREFIX}_FIND_DEVELOPMENT_${id}_ARTIFACTS)
+ if (${_PYTHON_PREFIX}_FIND_VERSION_EXACT)
+ _python_validate_include_dir (VERSION ${${_PYTHON_PREFIX}_FIND_VERSION} EXACT CHECK_EXISTS)
+ elseif (${_PYTHON_PREFIX}_FIND_VERSION_RANGE)
+ _python_validate_include_dir (IN_RANGE CHECK_EXISTS)
+ elseif (${_PYTHON_PREFIX}_FIND_VERSION)
+ _python_validate_include_dir (VERSION ${${_PYTHON_PREFIX}_FIND_VERSION} CHECK_EXISTS)
+ else()
+ _python_validate_include_dir (CHECK_EXISTS)
+ endif()
+ endif()
+ else()
+ if ("LIBRARY" IN_LIST _${_PYTHON_PREFIX}_FIND_DEVELOPMENT_${id}_ARTIFACTS)
+ unset (_${_PYTHON_PREFIX}_LIBRARY_RELEASE CACHE)
+ unset (_${_PYTHON_PREFIX}_LIBRARY_DEBUG CACHE)
+ endif()
+ if ("INCLUDE_DIR" IN_LIST _${_PYTHON_PREFIX}_FIND_DEVELOPMENT_${id}_ARTIFACTS)
+ unset (_${_PYTHON_PREFIX}_INCLUDE_DIR CACHE)
+ endif()
+ endif()
+ if (("LIBRARY" IN_LIST _${_PYTHON_PREFIX}_FIND_DEVELOPMENT_${id}_ARTIFACTS
+ AND NOT _${_PYTHON_PREFIX}_LIBRARY_RELEASE)
+ OR ("INCLUDE_DIR" IN_LIST _${_PYTHON_PREFIX}_FIND_DEVELOPMENT_${id}_ARTIFACTS
+ AND NOT _${_PYTHON_PREFIX}_INCLUDE_DIR))
+ unset (_${_PYTHON_PREFIX}_CONFIG CACHE)
+ unset (_${_PYTHON_PREFIX}_DEVELOPMENT_${id}_SIGNATURE CACHE)
+ endif()
+ endif()
+endfunction()
+
+function (_PYTHON_COMPUTE_DEVELOPMENT_SIGNATURE module)
+ string (TOUPPER "${module}" id)
+ if (${_PYTHON_PREFIX}_Development.${module}_FOUND)
+ set (signature "${_${_PYTHON_PREFIX}_SIGNATURE}:")
+ if ("LIBRARY" IN_LIST _${_PYTHON_PREFIX}_FIND_DEVELOPMENT_${id}_ARTIFACTS)
+ list (APPEND signature "${_${_PYTHON_PREFIX}_LIBRARY_RELEASE}:")
+ endif()
+ if ("INCLUDE_DIR" IN_LIST _${_PYTHON_PREFIX}_FIND_DEVELOPMENT_${id}_ARTIFACTS)
+ list (APPEND signature "${_${_PYTHON_PREFIX}_INCLUDE_DIR}:")
+ endif()
+ string (MD5 signature "${signature}")
+ set (_${_PYTHON_PREFIX}_DEVELOPMENT_${id}_SIGNATURE "${signature}" CACHE INTERNAL "")
+ else()
+ unset (_${_PYTHON_PREFIX}_DEVELOPMENT_${id}_SIGNATURE CACHE)
+ endif()
+endfunction()
+
+
+unset (_${_PYTHON_PREFIX}_REQUIRED_VARS)
+unset (_${_PYTHON_PREFIX}_CACHED_VARS)
+unset (_${_PYTHON_PREFIX}_Interpreter_REASON_FAILURE)
+unset (_${_PYTHON_PREFIX}_Compiler_REASON_FAILURE)
+unset (_${_PYTHON_PREFIX}_Development_REASON_FAILURE)
+unset (_${_PYTHON_PREFIX}_NumPy_REASON_FAILURE)
+
+
+# preamble
+## For IronPython on platforms other than Windows, search for the .Net interpreter
+if ("IronPython" IN_LIST _${_PYTHON_PREFIX}_FIND_IMPLEMENTATIONS
+ AND NOT WIN32)
+ find_program (${_PYTHON_PREFIX}_DOTNET_LAUNCHER
+ NAMES "mono")
+endif()
+
+
+# first step, search for the interpreter
+if ("Interpreter" IN_LIST ${_PYTHON_PREFIX}_FIND_COMPONENTS)
+ list (APPEND _${_PYTHON_PREFIX}_CACHED_VARS _${_PYTHON_PREFIX}_EXECUTABLE
+ _${_PYTHON_PREFIX}_INTERPRETER_PROPERTIES)
+ if (${_PYTHON_PREFIX}_FIND_REQUIRED_Interpreter)
+ list (APPEND _${_PYTHON_PREFIX}_REQUIRED_VARS ${_PYTHON_PREFIX}_EXECUTABLE)
+ endif()
+
+ if (DEFINED ${_PYTHON_PREFIX}_EXECUTABLE
+ AND IS_ABSOLUTE "${${_PYTHON_PREFIX}_EXECUTABLE}")
+ if (NOT ${_PYTHON_PREFIX}_EXECUTABLE STREQUAL _${_PYTHON_PREFIX}_EXECUTABLE)
+ # invalidate cache properties
+ unset (_${_PYTHON_PREFIX}_INTERPRETER_PROPERTIES CACHE)
+ endif()
+ set (_${_PYTHON_PREFIX}_EXECUTABLE "${${_PYTHON_PREFIX}_EXECUTABLE}" CACHE INTERNAL "")
+ elseif (DEFINED _${_PYTHON_PREFIX}_EXECUTABLE)
+ # compute interpreter signature and check validity of definition
+ string (MD5 __${_PYTHON_PREFIX}_INTERPRETER_SIGNATURE "${_${_PYTHON_PREFIX}_SIGNATURE}:${_${_PYTHON_PREFIX}_EXECUTABLE}")
+ if (__${_PYTHON_PREFIX}_INTERPRETER_SIGNATURE STREQUAL _${_PYTHON_PREFIX}_INTERPRETER_SIGNATURE)
+ # check version validity
+ if (${_PYTHON_PREFIX}_FIND_VERSION_EXACT)
+ _python_validate_interpreter (VERSION ${${_PYTHON_PREFIX}_FIND_VERSION} EXACT CHECK_EXISTS)
+ elseif (${_PYTHON_PREFIX}_FIND_VERSION_RANGE)
+ _python_validate_interpreter (IN_RANGE CHECK_EXISTS)
+ elseif (DEFINED ${_PYTHON_PREFIX}_FIND_VERSION)
+ _python_validate_interpreter (VERSION ${${_PYTHON_PREFIX}_FIND_VERSION} CHECK_EXISTS)
+ else()
+ _python_validate_interpreter (CHECK_EXISTS)
+ endif()
+ else()
+ unset (_${_PYTHON_PREFIX}_EXECUTABLE CACHE)
+ endif()
+ if (NOT _${_PYTHON_PREFIX}_EXECUTABLE)
+ unset (_${_PYTHON_PREFIX}_INTERPRETER_SIGNATURE CACHE)
+ unset (_${_PYTHON_PREFIX}_INTERPRETER_PROPERTIES CACHE)
+ endif()
+ endif()
+
+ if (NOT _${_PYTHON_PREFIX}_EXECUTABLE)
+ set (_${_PYTHON_PREFIX}_HINTS "${${_PYTHON_PREFIX}_ROOT_DIR}" ENV ${_PYTHON_PREFIX}_ROOT_DIR)
+
+ if (_${_PYTHON_PREFIX}_FIND_STRATEGY STREQUAL "LOCATION")
+ # build all executable names
+ _python_get_names (_${_PYTHON_PREFIX}_NAMES VERSION ${_${_PYTHON_PREFIX}_FIND_VERSIONS} POSIX INTERPRETER)
+ _python_get_path_suffixes (_${_PYTHON_PREFIX}_PATH_SUFFIXES VERSION ${_${_PYTHON_PREFIX}_FIND_VERSIONS} INTERPRETER)
+
+ # Framework Paths
+ _python_get_frameworks (_${_PYTHON_PREFIX}_FRAMEWORK_PATHS VERSION ${_${_PYTHON_PREFIX}_FIND_VERSIONS})
+ # Registry Paths
+ _python_get_registries (_${_PYTHON_PREFIX}_REGISTRY_PATHS VERSION ${_${_PYTHON_PREFIX}_FIND_VERSIONS})
+
+ set (_${_PYTHON_PREFIX}_VALIDATE_OPTIONS ${_${_PYTHON_PREFIX}_FIND_VERSION_EXACT})
+ if (${_PYTHON_PREFIX}_FIND_VERSION_RANGE)
+ list (APPEND _${_PYTHON_PREFIX}_VALIDATE_OPTIONS IN_RANGE)
+ elseif (DEFINED ${_PYTHON_PREFIX}_FIND_VERSION)
+ list (APPEND VERSION ${${_PYTHON_PREFIX}_FIND_VERSION})
+ endif()
+
+ while (TRUE)
+ # Virtual environments handling
+ if (_${_PYTHON_PREFIX}_FIND_VIRTUALENV MATCHES "^(FIRST|ONLY)$")
+ find_program (_${_PYTHON_PREFIX}_EXECUTABLE
+ NAMES ${_${_PYTHON_PREFIX}_NAMES}
+ NAMES_PER_DIR
+ HINTS ${_${_PYTHON_PREFIX}_HINTS}
+ PATHS ENV VIRTUAL_ENV ENV CONDA_PREFIX
+ PATH_SUFFIXES ${_${_PYTHON_PREFIX}_PATH_SUFFIXES}
+ NO_CMAKE_PATH
+ NO_CMAKE_ENVIRONMENT_PATH
+ NO_SYSTEM_ENVIRONMENT_PATH
+ NO_CMAKE_SYSTEM_PATH)
+
+ _python_validate_interpreter (${_${_PYTHON_PREFIX}_VALIDATE_OPTIONS})
+ if (_${_PYTHON_PREFIX}_EXECUTABLE)
+ break()
+ endif()
+ if (_${_PYTHON_PREFIX}_FIND_VIRTUALENV STREQUAL "ONLY")
+ break()
+ endif()
+ endif()
+
+ # Apple frameworks handling
+ if (CMAKE_HOST_APPLE AND _${_PYTHON_PREFIX}_FIND_FRAMEWORK STREQUAL "FIRST")
+ find_program (_${_PYTHON_PREFIX}_EXECUTABLE
+ NAMES ${_${_PYTHON_PREFIX}_NAMES}
+ NAMES_PER_DIR
+ HINTS ${_${_PYTHON_PREFIX}_HINTS}
+ PATHS ${_${_PYTHON_PREFIX}_FRAMEWORK_PATHS}
+ PATH_SUFFIXES ${_${_PYTHON_PREFIX}_PATH_SUFFIXES}
+ NO_CMAKE_PATH
+ NO_CMAKE_ENVIRONMENT_PATH
+ NO_SYSTEM_ENVIRONMENT_PATH
+ NO_CMAKE_SYSTEM_PATH)
+ _python_validate_interpreter (${${_PYTHON_PREFIX}_VALIDATE_OPTIONS})
+ if (_${_PYTHON_PREFIX}_EXECUTABLE)
+ break()
+ endif()
+ endif()
+ # Windows registry
+ if (CMAKE_HOST_WIN32 AND _${_PYTHON_PREFIX}_FIND_REGISTRY STREQUAL "FIRST")
+ find_program (_${_PYTHON_PREFIX}_EXECUTABLE
+ NAMES ${_${_PYTHON_PREFIX}_NAMES}
+ NAMES_PER_DIR
+ HINTS ${_${_PYTHON_PREFIX}_HINTS}
+ PATHS ${_${_PYTHON_PREFIX}_REGISTRY_PATHS}
+ PATH_SUFFIXES ${_${_PYTHON_PREFIX}_PATH_SUFFIXES}
+ NO_SYSTEM_ENVIRONMENT_PATH
+ NO_CMAKE_SYSTEM_PATH)
+ _python_validate_interpreter (${${_PYTHON_PREFIX}_VALIDATE_OPTIONS})
+ if (_${_PYTHON_PREFIX}_EXECUTABLE)
+ break()
+ endif()
+ endif()
+
+ # try using HINTS
+ find_program (_${_PYTHON_PREFIX}_EXECUTABLE
+ NAMES ${_${_PYTHON_PREFIX}_NAMES}
+ NAMES_PER_DIR
+ HINTS ${_${_PYTHON_PREFIX}_HINTS}
+ PATH_SUFFIXES ${_${_PYTHON_PREFIX}_PATH_SUFFIXES}
+ NO_SYSTEM_ENVIRONMENT_PATH
+ NO_CMAKE_SYSTEM_PATH)
+ _python_validate_interpreter (${${_PYTHON_PREFIX}_VALIDATE_OPTIONS})
+ if (_${_PYTHON_PREFIX}_EXECUTABLE)
+ break()
+ endif()
+ # try using standard paths
+ find_program (_${_PYTHON_PREFIX}_EXECUTABLE
+ NAMES ${_${_PYTHON_PREFIX}_NAMES}
+ NAMES_PER_DIR
+ PATH_SUFFIXES ${_${_PYTHON_PREFIX}_PATH_SUFFIXES})
+ _python_validate_interpreter (${${_PYTHON_PREFIX}_VALIDATE_OPTIONS})
+ if (_${_PYTHON_PREFIX}_EXECUTABLE)
+ break()
+ endif()
+
+ # Apple frameworks handling
+ if (CMAKE_HOST_APPLE AND _${_PYTHON_PREFIX}_FIND_FRAMEWORK STREQUAL "LAST")
+ find_program (_${_PYTHON_PREFIX}_EXECUTABLE
+ NAMES ${_${_PYTHON_PREFIX}_NAMES}
+ NAMES_PER_DIR
+ PATHS ${_${_PYTHON_PREFIX}_FRAMEWORK_PATHS}
+ PATH_SUFFIXES ${_${_PYTHON_PREFIX}_PATH_SUFFIXES}
+ NO_DEFAULT_PATH)
+ _python_validate_interpreter (${${_PYTHON_PREFIX}_VALIDATE_OPTIONS})
+ if (_${_PYTHON_PREFIX}_EXECUTABLE)
+ break()
+ endif()
+ endif()
+ # Windows registry
+ if (CMAKE_HOST_WIN32 AND _${_PYTHON_PREFIX}_FIND_REGISTRY STREQUAL "LAST")
+ find_program (_${_PYTHON_PREFIX}_EXECUTABLE
+ NAMES ${_${_PYTHON_PREFIX}_NAMES}
+ NAMES_PER_DIR
+ PATHS ${_${_PYTHON_PREFIX}_REGISTRY_PATHS}
+ PATH_SUFFIXES ${_${_PYTHON_PREFIX}_PATH_SUFFIXES}
+ NO_DEFAULT_PATH)
+ _python_validate_interpreter (${${_PYTHON_PREFIX}_VALIDATE_OPTIONS})
+ if (_${_PYTHON_PREFIX}_EXECUTABLE)
+ break()
+ endif()
+ endif()
+
+ break()
+ endwhile()
+ else()
+ # look-up for various versions and locations
+ set (_${_PYTHON_PREFIX}_VALIDATE_OPTIONS EXACT)
+ if (${_PYTHON_PREFIX}_FIND_VERSION_RANGE)
+ list (APPEND _${_PYTHON_PREFIX}_VALIDATE_OPTIONS IN_RANGE)
+ endif()
+
+ foreach (_${_PYTHON_PREFIX}_VERSION IN LISTS _${_PYTHON_PREFIX}_FIND_VERSIONS)
+ _python_get_names (_${_PYTHON_PREFIX}_NAMES VERSION ${_${_PYTHON_PREFIX}_VERSION} POSIX INTERPRETER)
+ _python_get_path_suffixes (_${_PYTHON_PREFIX}_PATH_SUFFIXES VERSION ${_${_PYTHON_PREFIX}_VERSION} INTERPRETER)
+
+ _python_get_frameworks (_${_PYTHON_PREFIX}_FRAMEWORK_PATHS VERSION ${_${_PYTHON_PREFIX}_VERSION})
+ _python_get_registries (_${_PYTHON_PREFIX}_REGISTRY_PATHS VERSION ${_${_PYTHON_PREFIX}_VERSION})
+
+ # Virtual environments handling
+ if (_${_PYTHON_PREFIX}_FIND_VIRTUALENV MATCHES "^(FIRST|ONLY)$")
+ find_program (_${_PYTHON_PREFIX}_EXECUTABLE
+ NAMES ${_${_PYTHON_PREFIX}_NAMES}
+ NAMES_PER_DIR
+ HINTS ${_${_PYTHON_PREFIX}_HINTS}
+ PATHS ENV VIRTUAL_ENV ENV CONDA_PREFIX
+ PATH_SUFFIXES ${_${_PYTHON_PREFIX}_PATH_SUFFIXES}
+ NO_CMAKE_PATH
+ NO_CMAKE_ENVIRONMENT_PATH
+ NO_SYSTEM_ENVIRONMENT_PATH
+ NO_CMAKE_SYSTEM_PATH)
+ _python_validate_interpreter (VERSION ${_${_PYTHON_PREFIX}_VERSION} ${_${_PYTHON_PREFIX}_VALIDATE_OPTIONS})
+ if (_${_PYTHON_PREFIX}_EXECUTABLE)
+ break()
+ endif()
+ if (_${_PYTHON_PREFIX}_FIND_VIRTUALENV STREQUAL "ONLY")
+ continue()
+ endif()
+ endif()
+
+ # Apple frameworks handling
+ if (CMAKE_HOST_APPLE AND _${_PYTHON_PREFIX}_FIND_FRAMEWORK STREQUAL "FIRST")
+ find_program (_${_PYTHON_PREFIX}_EXECUTABLE
+ NAMES ${_${_PYTHON_PREFIX}_NAMES}
+ NAMES_PER_DIR
+ HINTS ${_${_PYTHON_PREFIX}_HINTS}
+ PATHS ${_${_PYTHON_PREFIX}_FRAMEWORK_PATHS}
+ PATH_SUFFIXES ${_${_PYTHON_PREFIX}_PATH_SUFFIXES}
+ NO_CMAKE_PATH
+ NO_CMAKE_ENVIRONMENT_PATH
+ NO_SYSTEM_ENVIRONMENT_PATH
+ NO_CMAKE_SYSTEM_PATH)
+ endif()
+
+ # Windows registry
+ if (CMAKE_HOST_WIN32 AND _${_PYTHON_PREFIX}_FIND_REGISTRY STREQUAL "FIRST")
+ find_program (_${_PYTHON_PREFIX}_EXECUTABLE
+ NAMES ${_${_PYTHON_PREFIX}_NAMES}
+ NAMES_PER_DIR
+ HINTS ${_${_PYTHON_PREFIX}_HINTS}
+ PATHS ${_${_PYTHON_PREFIX}_REGISTRY_PATHS}
+ PATH_SUFFIXES ${_${_PYTHON_PREFIX}_PATH_SUFFIXES}
+ NO_SYSTEM_ENVIRONMENT_PATH
+ NO_CMAKE_SYSTEM_PATH)
+ endif()
+
+ _python_validate_interpreter (VERSION ${_${_PYTHON_PREFIX}_VERSION} ${_${_PYTHON_PREFIX}_VALIDATE_OPTIONS})
+ if (_${_PYTHON_PREFIX}_EXECUTABLE)
+ break()
+ endif()
+
+ # try using HINTS
+ find_program (_${_PYTHON_PREFIX}_EXECUTABLE
+ NAMES ${_${_PYTHON_PREFIX}_NAMES}
+ NAMES_PER_DIR
+ HINTS ${_${_PYTHON_PREFIX}_HINTS}
+ PATH_SUFFIXES ${_${_PYTHON_PREFIX}_PATH_SUFFIXES}
+ NO_SYSTEM_ENVIRONMENT_PATH
+ NO_CMAKE_SYSTEM_PATH)
+ _python_validate_interpreter (VERSION ${_${_PYTHON_PREFIX}_VERSION} ${_${_PYTHON_PREFIX}_VALIDATE_OPTIONS})
+ if (_${_PYTHON_PREFIX}_EXECUTABLE)
+ break()
+ endif()
+ # try using standard paths.
+ # NAMES_PER_DIR is not defined on purpose to have a chance to find
+ # expected version.
+ # For example, typical systems have 'python' for version 2.* and 'python3'
+ # for version 3.*. So looking for names per dir will find, potentially,
+ # systematically 'python' (i.e. version 2) even if version 3 is searched.
+ find_program (_${_PYTHON_PREFIX}_EXECUTABLE
+ NAMES ${_${_PYTHON_PREFIX}_NAMES}
+ PATH_SUFFIXES ${_${_PYTHON_PREFIX}_PATH_SUFFIXES})
+ _python_validate_interpreter (VERSION ${_${_PYTHON_PREFIX}_VERSION} ${_${_PYTHON_PREFIX}_VALIDATE_OPTIONS})
+ if (_${_PYTHON_PREFIX}_EXECUTABLE)
+ break()
+ endif()
+
+ # Apple frameworks handling
+ if (CMAKE_HOST_APPLE AND _${_PYTHON_PREFIX}_FIND_FRAMEWORK STREQUAL "LAST")
+ find_program (_${_PYTHON_PREFIX}_EXECUTABLE
+ NAMES ${_${_PYTHON_PREFIX}_NAMES}
+ NAMES_PER_DIR
+ PATHS ${_${_PYTHON_PREFIX}_FRAMEWORK_PATHS}
+ PATH_SUFFIXES ${_${_PYTHON_PREFIX}_PATH_SUFFIXES}
+ NO_DEFAULT_PATH)
+ endif()
+
+ # Windows registry
+ if (CMAKE_HOST_WIN32 AND _${_PYTHON_PREFIX}_FIND_REGISTRY STREQUAL "LAST")
+ find_program (_${_PYTHON_PREFIX}_EXECUTABLE
+ NAMES ${_${_PYTHON_PREFIX}_NAMES}
+ NAMES_PER_DIR
+ PATHS ${_${_PYTHON_PREFIX}_REGISTRY_PATHS}
+ PATH_SUFFIXES ${_${_PYTHON_PREFIX}_PATH_SUFFIXES}
+ NO_DEFAULT_PATH)
+ endif()
+
+ _python_validate_interpreter (VERSION ${_${_PYTHON_PREFIX}_VERSION} ${_${_PYTHON_PREFIX}_VALIDATE_OPTIONS})
+ if (_${_PYTHON_PREFIX}_EXECUTABLE)
+ break()
+ endif()
+ endforeach()
+
+ if (NOT _${_PYTHON_PREFIX}_EXECUTABLE AND
+ NOT _${_PYTHON_PREFIX}_FIND_VIRTUALENV STREQUAL "ONLY")
+ # No specific version found. Retry with generic names and standard paths.
+ # NAMES_PER_DIR is not defined on purpose to have a chance to find
+ # expected version.
+ # For example, typical systems have 'python' for version 2.* and 'python3'
+ # for version 3.*. So looking for names per dir will find, potentially,
+ # systematically 'python' (i.e. version 2) even if version 3 is searched.
+ _python_get_names (_${_PYTHON_PREFIX}_NAMES POSIX INTERPRETER)
+ find_program (_${_PYTHON_PREFIX}_EXECUTABLE
+ NAMES ${_${_PYTHON_PREFIX}_NAMES})
+ _python_validate_interpreter ()
+ endif()
+ endif()
+ endif()
+
+ set (${_PYTHON_PREFIX}_EXECUTABLE "${_${_PYTHON_PREFIX}_EXECUTABLE}")
+ _python_get_launcher (_${_PYTHON_PREFIX}_INTERPRETER_LAUNCHER INTERPRETER)
+
+ # retrieve exact version of executable found
+ if (_${_PYTHON_PREFIX}_EXECUTABLE)
+ execute_process (COMMAND ${_${_PYTHON_PREFIX}_INTERPRETER_LAUNCHER} "${_${_PYTHON_PREFIX}_EXECUTABLE}" -c
+ "import sys; sys.stdout.write('.'.join([str(x) for x in sys.version_info[:3]]))"
+ RESULT_VARIABLE _${_PYTHON_PREFIX}_RESULT
+ OUTPUT_VARIABLE ${_PYTHON_PREFIX}_VERSION
+ ERROR_QUIET
+ OUTPUT_STRIP_TRAILING_WHITESPACE)
+ if (NOT _${_PYTHON_PREFIX}_RESULT)
+ set (_${_PYTHON_PREFIX}_EXECUTABLE_USABLE TRUE)
+ else()
+ # Interpreter is not usable
+ set (_${_PYTHON_PREFIX}_EXECUTABLE_USABLE FALSE)
+ unset (${_PYTHON_PREFIX}_VERSION)
+ set (_${_PYTHON_PREFIX}_Interpreter_REASON_FAILURE "Cannot run the interpreter \"${_${_PYTHON_PREFIX}_EXECUTABLE}\"")
+ endif()
+ endif()
+
+ if (_${_PYTHON_PREFIX}_EXECUTABLE AND _${_PYTHON_PREFIX}_EXECUTABLE_USABLE)
+ if (_${_PYTHON_PREFIX}_INTERPRETER_PROPERTIES)
+ set (${_PYTHON_PREFIX}_Interpreter_FOUND TRUE)
+
+ list (GET _${_PYTHON_PREFIX}_INTERPRETER_PROPERTIES 0 ${_PYTHON_PREFIX}_INTERPRETER_ID)
+
+ list (GET _${_PYTHON_PREFIX}_INTERPRETER_PROPERTIES 1 ${_PYTHON_PREFIX}_VERSION_MAJOR)
+ list (GET _${_PYTHON_PREFIX}_INTERPRETER_PROPERTIES 2 ${_PYTHON_PREFIX}_VERSION_MINOR)
+ list (GET _${_PYTHON_PREFIX}_INTERPRETER_PROPERTIES 3 ${_PYTHON_PREFIX}_VERSION_PATCH)
+
+ list (GET _${_PYTHON_PREFIX}_INTERPRETER_PROPERTIES 4 _${_PYTHON_PREFIX}_ARCH)
+ set (_${_PYTHON_PREFIX}_ARCH2 ${_${_PYTHON_PREFIX}_ARCH})
+
+ list (GET _${_PYTHON_PREFIX}_INTERPRETER_PROPERTIES 5 _${_PYTHON_PREFIX}_ABIFLAGS)
+ list (GET _${_PYTHON_PREFIX}_INTERPRETER_PROPERTIES 6 ${_PYTHON_PREFIX}_SOABI)
+
+ list (GET _${_PYTHON_PREFIX}_INTERPRETER_PROPERTIES 7 ${_PYTHON_PREFIX}_STDLIB)
+ list (GET _${_PYTHON_PREFIX}_INTERPRETER_PROPERTIES 8 ${_PYTHON_PREFIX}_STDARCH)
+ list (GET _${_PYTHON_PREFIX}_INTERPRETER_PROPERTIES 9 ${_PYTHON_PREFIX}_SITELIB)
+ list (GET _${_PYTHON_PREFIX}_INTERPRETER_PROPERTIES 10 ${_PYTHON_PREFIX}_SITEARCH)
+ else()
+ string (REGEX MATCHALL "[0-9]+" _${_PYTHON_PREFIX}_VERSIONS "${${_PYTHON_PREFIX}_VERSION}")
+ list (GET _${_PYTHON_PREFIX}_VERSIONS 0 ${_PYTHON_PREFIX}_VERSION_MAJOR)
+ list (GET _${_PYTHON_PREFIX}_VERSIONS 1 ${_PYTHON_PREFIX}_VERSION_MINOR)
+ list (GET _${_PYTHON_PREFIX}_VERSIONS 2 ${_PYTHON_PREFIX}_VERSION_PATCH)
+
+ if (${_PYTHON_PREFIX}_VERSION_MAJOR VERSION_EQUAL _${_PYTHON_PREFIX}_REQUIRED_VERSION_MAJOR)
+ set (${_PYTHON_PREFIX}_Interpreter_FOUND TRUE)
+
+ # Use interpreter version and ABI for future searches to ensure consistency
+ set (_${_PYTHON_PREFIX}_FIND_VERSIONS ${${_PYTHON_PREFIX}_VERSION_MAJOR}.${${_PYTHON_PREFIX}_VERSION_MINOR})
+ execute_process (COMMAND ${_${_PYTHON_PREFIX}_INTERPRETR_LAUNCHER} "${_${_PYTHON_PREFIX}_EXECUTABLE}" -c
+ "import sys; sys.stdout.write(sys.abiflags)"
+ RESULT_VARIABLE _${_PYTHON_PREFIX}_RESULT
+ OUTPUT_VARIABLE _${_PYTHON_PREFIX}_ABIFLAGS
+ ERROR_QUIET
+ OUTPUT_STRIP_TRAILING_WHITESPACE)
+ if (_${_PYTHON_PREFIX}_RESULT)
+ # assunme ABI is not supported
+ set (_${_PYTHON_PREFIX}_ABIFLAGS "")
+ endif()
+ endif()
+
+ if (${_PYTHON_PREFIX}_Interpreter_FOUND)
+ unset (_${_PYTHON_PREFIX}_Interpreter_REASON_FAILURE)
+
+ # compute and save interpreter signature
+ string (MD5 __${_PYTHON_PREFIX}_INTERPRETER_SIGNATURE "${_${_PYTHON_PREFIX}_SIGNATURE}:${_${_PYTHON_PREFIX}_EXECUTABLE}")
+ set (_${_PYTHON_PREFIX}_INTERPRETER_SIGNATURE "${__${_PYTHON_PREFIX}_INTERPRETER_SIGNATURE}" CACHE INTERNAL "")
+
+ if (NOT CMAKE_SIZEOF_VOID_P)
+ # determine interpreter architecture
+ execute_process (COMMAND ${_${_PYTHON_PREFIX}_INTERPRETER_LAUNCHER} "${_${_PYTHON_PREFIX}_EXECUTABLE}" -c
+ "import sys; sys.stdout.write(str(sys.maxsize > 2**32))"
+ RESULT_VARIABLE _${_PYTHON_PREFIX}_RESULT
+ OUTPUT_VARIABLE ${_PYTHON_PREFIX}_IS64BIT
+ ERROR_VARIABLE ${_PYTHON_PREFIX}_IS64BIT)
+ if (NOT _${_PYTHON_PREFIX}_RESULT)
+ if (${_PYTHON_PREFIX}_IS64BIT)
+ set (_${_PYTHON_PREFIX}_ARCH 64)
+ set (_${_PYTHON_PREFIX}_ARCH2 64)
+ else()
+ set (_${_PYTHON_PREFIX}_ARCH 32)
+ set (_${_PYTHON_PREFIX}_ARCH2 32)
+ endif()
+ endif()
+ endif()
+
+ # retrieve interpreter identity
+ execute_process (COMMAND ${_${_PYTHON_PREFIX}_INTERPRETER_LAUNCHER} "${_${_PYTHON_PREFIX}_EXECUTABLE}" -V
+ RESULT_VARIABLE _${_PYTHON_PREFIX}_RESULT
+ OUTPUT_VARIABLE ${_PYTHON_PREFIX}_INTERPRETER_ID
+ ERROR_VARIABLE ${_PYTHON_PREFIX}_INTERPRETER_ID)
+ if (NOT _${_PYTHON_PREFIX}_RESULT)
+ if (${_PYTHON_PREFIX}_INTERPRETER_ID MATCHES "Anaconda")
+ set (${_PYTHON_PREFIX}_INTERPRETER_ID "Anaconda")
+ elseif (${_PYTHON_PREFIX}_INTERPRETER_ID MATCHES "Enthought")
+ set (${_PYTHON_PREFIX}_INTERPRETER_ID "Canopy")
+ elseif (${_PYTHON_PREFIX}_INTERPRETER_ID MATCHES "PyPy ([0-9.]+)")
+ set (${_PYTHON_PREFIX}_INTERPRETER_ID "PyPy")
+ set (${_PYTHON_PREFIX}_PyPy_VERSION "${CMAKE_MATCH_1}")
+ else()
+ string (REGEX REPLACE "^([^ ]+).*" "\\1" ${_PYTHON_PREFIX}_INTERPRETER_ID "${${_PYTHON_PREFIX}_INTERPRETER_ID}")
+ if (${_PYTHON_PREFIX}_INTERPRETER_ID STREQUAL "Python")
+ # try to get a more precise ID
+ execute_process (COMMAND ${_${_PYTHON_PREFIX}_INTERPRETER_LAUNCHER} "${_${_PYTHON_PREFIX}_EXECUTABLE}" -c
+ "import sys; sys.stdout.write(sys.copyright)"
+ RESULT_VARIABLE _${_PYTHON_PREFIX}_RESULT
+ OUTPUT_VARIABLE ${_PYTHON_PREFIX}_COPYRIGHT
+ ERROR_QUIET)
+ if (${_PYTHON_PREFIX}_COPYRIGHT MATCHES "ActiveState")
+ set (${_PYTHON_PREFIX}_INTERPRETER_ID "ActivePython")
+ endif()
+ endif()
+ endif()
+ else()
+ set (${_PYTHON_PREFIX}_INTERPRETER_ID Python)
+ endif()
+
+ # retrieve various package installation directories
+ execute_process (COMMAND ${_${_PYTHON_PREFIX}_INTERPRETER_LAUNCHER} "${_${_PYTHON_PREFIX}_EXECUTABLE}" -c
+ "import sys\ntry:\n from distutils import sysconfig\n sys.stdout.write(';'.join([sysconfig.get_python_lib(plat_specific=False,standard_lib=True),sysconfig.get_python_lib(plat_specific=True,standard_lib=True),sysconfig.get_python_lib(plat_specific=False,standard_lib=False),sysconfig.get_python_lib(plat_specific=True,standard_lib=False)]))\nexcept Exception:\n import sysconfig\n sys.stdout.write(';'.join([sysconfig.get_path('stdlib'),sysconfig.get_path('platstdlib'),sysconfig.get_path('purelib'),sysconfig.get_path('platlib')]))"
+ RESULT_VARIABLE _${_PYTHON_PREFIX}_RESULT
+ OUTPUT_VARIABLE _${_PYTHON_PREFIX}_LIBPATHS
+ ERROR_QUIET)
+ if (NOT _${_PYTHON_PREFIX}_RESULT)
+ list (GET _${_PYTHON_PREFIX}_LIBPATHS 0 ${_PYTHON_PREFIX}_STDLIB)
+ list (GET _${_PYTHON_PREFIX}_LIBPATHS 1 ${_PYTHON_PREFIX}_STDARCH)
+ list (GET _${_PYTHON_PREFIX}_LIBPATHS 2 ${_PYTHON_PREFIX}_SITELIB)
+ list (GET _${_PYTHON_PREFIX}_LIBPATHS 3 ${_PYTHON_PREFIX}_SITEARCH)
+ else()
+ unset (${_PYTHON_PREFIX}_STDLIB)
+ unset (${_PYTHON_PREFIX}_STDARCH)
+ unset (${_PYTHON_PREFIX}_SITELIB)
+ unset (${_PYTHON_PREFIX}_SITEARCH)
+ endif()
+
+ _python_get_config_var (${_PYTHON_PREFIX}_SOABI SOABI)
+
+ # store properties in the cache to speed-up future searches
+ set (_${_PYTHON_PREFIX}_INTERPRETER_PROPERTIES
+ "${${_PYTHON_PREFIX}_INTERPRETER_ID};${${_PYTHON_PREFIX}_VERSION_MAJOR};${${_PYTHON_PREFIX}_VERSION_MINOR};${${_PYTHON_PREFIX}_VERSION_PATCH};${_${_PYTHON_PREFIX}_ARCH};${_${_PYTHON_PREFIX}_ABIFLAGS};${${_PYTHON_PREFIX}_SOABI};${${_PYTHON_PREFIX}_STDLIB};${${_PYTHON_PREFIX}_STDARCH};${${_PYTHON_PREFIX}_SITELIB};${${_PYTHON_PREFIX}_SITEARCH}" CACHE INTERNAL "${_PYTHON_PREFIX} Properties")
+ else()
+ unset (_${_PYTHON_PREFIX}_INTERPRETER_SIGNATURE CACHE)
+ unset (${_PYTHON_PREFIX}_INTERPRETER_ID)
+ endif()
+ endif()
+ endif()
+
+ if (${_PYTHON_PREFIX}_ARTIFACTS_INTERACTIVE)
+ set (${_PYTHON_PREFIX}_EXECUTABLE "${_${_PYTHON_PREFIX}_EXECUTABLE}" CACHE FILEPATH "${_PYTHON_PREFIX} Interpreter")
+ endif()
+
+ _python_mark_as_internal (_${_PYTHON_PREFIX}_EXECUTABLE
+ _${_PYTHON_PREFIX}_INTERPRETER_PROPERTIES
+ _${_PYTHON_PREFIX}_INTERPRETER_SIGNATURE)
+endif()
+
+
+# second step, search for compiler (IronPython)
+if ("Compiler" IN_LIST ${_PYTHON_PREFIX}_FIND_COMPONENTS)
+ list (APPEND _${_PYTHON_PREFIX}_CACHED_VARS _${_PYTHON_PREFIX}_COMPILER)
+ if (${_PYTHON_PREFIX}_FIND_REQUIRED_Compiler)
+ list (APPEND _${_PYTHON_PREFIX}_REQUIRED_VARS ${_PYTHON_PREFIX}_COMPILER)
+ endif()
+
+ if (NOT "IronPython" IN_LIST _${_PYTHON_PREFIX}_FIND_IMPLEMENTATIONS)
+ unset (_${_PYTHON_PREFIX}_COMPILER CACHE)
+ unset (_${_PYTHON_PREFIX}_COMPILER_SIGNATURE CACHE)
+ elseif (DEFINED ${_PYTHON_PREFIX}_COMPILER
+ AND IS_ABSOLUTE "${${_PYTHON_PREFIX}_COMPILER}")
+ set (_${_PYTHON_PREFIX}_COMPILER "${${_PYTHON_PREFIX}_COMPILER}" CACHE INTERNAL "")
+ elseif (DEFINED _${_PYTHON_PREFIX}_COMPILER)
+ # compute compiler signature and check validity of definition
+ string (MD5 __${_PYTHON_PREFIX}_COMPILER_SIGNATURE "${_${_PYTHON_PREFIX}_SIGNATURE}:${_${_PYTHON_PREFIX}_COMPILER}")
+ if (__${_PYTHON_PREFIX}_COMPILER_SIGNATURE STREQUAL _${_PYTHON_PREFIX}_COMPILER_SIGNATURE)
+ # check version validity
+ if (${_PYTHON_PREFIX}_FIND_VERSION_EXACT)
+ _python_validate_compiler (VERSION ${${_PYTHON_PREFIX}_FIND_VERSION} EXACT CHECK_EXISTS)
+ elseif (${_PYTHON_PREFIX}_FIND_VERSION_RANGE)
+ _python_validate_compiler (IN_RANGE CHECK_EXISTS)
+ elseif (DEFINED ${_PYTHON_PREFIX}_FIND_VERSION)
+ _python_validate_compiler (VERSION ${${_PYTHON_PREFIX}_FIND_VERSION} CHECK_EXISTS)
+ else()
+ _python_validate_compiler (CHECK_EXISTS)
+ endif()
+ else()
+ unset (_${_PYTHON_PREFIX}_COMPILER CACHE)
+ unset (_${_PYTHON_PREFIX}_COMPILER_SIGNATURE CACHE)
+ endif()
+ endif()
+
+ if ("IronPython" IN_LIST _${_PYTHON_PREFIX}_FIND_IMPLEMENTATIONS
+ AND NOT _${_PYTHON_PREFIX}_COMPILER)
+ # IronPython specific artifacts
+ # If IronPython interpreter is found, use its path
+ unset (_${_PYTHON_PREFIX}_IRON_ROOT)
+ if (${_PYTHON_PREFIX}_Interpreter_FOUND AND ${_PYTHON_PREFIX}_INTERPRETER_ID STREQUAL "IronPython")
+ get_filename_component (_${_PYTHON_PREFIX}_IRON_ROOT "${${_PYTHON_PREFIX}_EXECUTABLE}" DIRECTORY)
+ endif()
+
+ if (_${_PYTHON_PREFIX}_FIND_STRATEGY STREQUAL "LOCATION")
+ _python_get_names (_${_PYTHON_PREFIX}_COMPILER_NAMES
+ IMPLEMENTATIONS IronPython
+ VERSION ${_${_PYTHON_PREFIX}_FIND_VERSIONS}
+ COMPILER)
+
+ _python_get_path_suffixes (_${_PYTHON_PREFIX}_PATH_SUFFIXES
+ IMPLEMENTATIONS IronPython
+ VERSION ${_${_PYTHON_PREFIX}_FIND_VERSIONS}
+ COMPILER)
+
+ _python_get_frameworks (_${_PYTHON_PREFIX}_FRAMEWORK_PATHS
+ IMPLEMENTATIONS IronPython
+ VERSION ${_${_PYTHON_PREFIX}_FIND_VERSIONS})
+ _python_get_registries (_${_PYTHON_PREFIX}_REGISTRY_PATHS
+ IMPLEMENTATIONS IronPython
+ VERSION ${_${_PYTHON_PREFIX}_FIND_VERSIONS})
+
+ set (_${_PYTHON_PREFIX}_VALIDATE_OPTIONS ${_${_PYTHON_PREFIX}_FIND_VERSION_EXACT})
+ if (${_PYTHON_PREFIX}_FIND_VERSION_RANGE)
+ list (APPEND _${_PYTHON_PREFIX}_VALIDATE_OPTIONS IN_RANGE)
+ elseif (DEFINED ${_PYTHON_PREFIX}_FIND_VERSION)
+ list (APPEND VERSION ${${_PYTHON_PREFIX}_FIND_VERSION})
+ endif()
+
+ while (TRUE)
+ # Apple frameworks handling
+ if (CMAKE_HOST_APPLE AND _${_PYTHON_PREFIX}_FIND_FRAMEWORK STREQUAL "FIRST")
+ find_program (_${_PYTHON_PREFIX}_COMPILER
+ NAMES ${_${_PYTHON_PREFIX}_COMPILER_NAMES}
+ NAMES_PER_DIR
+ HINTS ${_${_PYTHON_PREFIX}_IRON_ROOT} ${_${_PYTHON_PREFIX}_HINTS}
+ PATHS ${_${_PYTHON_PREFIX}_FRAMEWORK_PATHS}
+ PATH_SUFFIXES ${_${_PYTHON_PREFIX}_PATH_SUFFIXES}
+ NO_CMAKE_PATH
+ NO_CMAKE_ENVIRONMENT_PATH
+ NO_SYSTEM_ENVIRONMENT_PATH
+ NO_CMAKE_SYSTEM_PATH)
+ _python_validate_compiler (${_${_PYTHON_PREFIX}_VALIDATE_OPTIONS})
+ if (_${_PYTHON_PREFIX}_COMPILER)
+ break()
+ endif()
+ endif()
+ # Windows registry
+ if (CMAKE_HOST_WIN32 AND _${_PYTHON_PREFIX}_FIND_REGISTRY STREQUAL "FIRST")
+ find_program (_${_PYTHON_PREFIX}_COMPILER
+ NAMES ${_${_PYTHON_PREFIX}_COMPILER_NAMES}
+ NAMES_PER_DIR
+ HINTS ${_${_PYTHON_PREFIX}_IRON_ROOT} ${_${_PYTHON_PREFIX}_HINTS}
+ PATHS ${_${_PYTHON_PREFIX}_REGISTRY_PATHS}
+ PATH_SUFFIXES ${_${_PYTHON_PREFIX}_PATH_SUFFIXES}
+ NO_SYSTEM_ENVIRONMENT_PATH
+ NO_CMAKE_SYSTEM_PATH)
+ _python_validate_compiler (${_${_PYTHON_PREFIX}_VALIDATE_OPTIONS})
+ if (_${_PYTHON_PREFIX}_COMPILER)
+ break()
+ endif()
+ endif()
+
+ # try using HINTS
+ find_program (_${_PYTHON_PREFIX}_COMPILER
+ NAMES ${_${_PYTHON_PREFIX}_COMPILER_NAMES}
+ NAMES_PER_DIR
+ HINTS ${_${_PYTHON_PREFIX}_IRON_ROOT} ${_${_PYTHON_PREFIX}_HINTS}
+ PATH_SUFFIXES ${_${_PYTHON_PREFIX}_PATH_SUFFIXES}
+ NO_SYSTEM_ENVIRONMENT_PATH
+ NO_CMAKE_SYSTEM_PATH)
+ _python_validate_compiler (${_${_PYTHON_PREFIX}_VALIDATE_OPTIONS})
+ if (_${_PYTHON_PREFIX}_COMPILER)
+ break()
+ endif()
+
+ # try using standard paths
+ find_program (_${_PYTHON_PREFIX}_COMPILER
+ NAMES ${_${_PYTHON_PREFIX}_COMPILER_NAMES}
+ NAMES_PER_DIR
+ PATH_SUFFIXES ${_${_PYTHON_PREFIX}_PATH_SUFFIXES})
+ _python_validate_compiler (${_${_PYTHON_PREFIX}_VALIDATE_OPTIONS})
+ if (_${_PYTHON_PREFIX}_COMPILER)
+ break()
+ endif()
+
+ # Apple frameworks handling
+ if (CMAKE_HOST_APPLE AND _${_PYTHON_PREFIX}_FIND_FRAMEWORK STREQUAL "LAST")
+ find_program (_${_PYTHON_PREFIX}_COMPILER
+ NAMES ${_${_PYTHON_PREFIX}_COMPILER_NAMES}
+ NAMES_PER_DIR
+ PATHS ${_${_PYTHON_PREFIX}_FRAMEWORK_PATHS}
+ PATH_SUFFIXES ${_${_PYTHON_PREFIX}_PATH_SUFFIXES}
+ NO_DEFAULT_PATH)
+ _python_validate_compiler (${_${_PYTHON_PREFIX}_VALIDATE_OPTIONS})
+ if (_${_PYTHON_PREFIX}_COMPILER)
+ break()
+ endif()
+ endif()
+ # Windows registry
+ if (CMAKE_HOST_WIN32 AND _${_PYTHON_PREFIX}_FIND_REGISTRY STREQUAL "LAST")
+ find_program (_${_PYTHON_PREFIX}_COMPILER
+ NAMES ${_${_PYTHON_PREFIX}_COMPILER_NAMES}
+ NAMES_PER_DIR
+ PATHS ${_${_PYTHON_PREFIX}_REGISTRY_PATHS}
+ PATH_SUFFIXES ${_${_PYTHON_PREFIX}_PATH_SUFFIXES}
+ NO_DEFAULT_PATH)
+ _python_validate_compiler (${_${_PYTHON_PREFIX}_VALIDATE_OPTIONS})
+ if (_${_PYTHON_PREFIX}_COMPILER)
+ break()
+ endif()
+ endif()
+
+ break()
+ endwhile()
+ else()
+ # try using root dir and registry
+ set (_${_PYTHON_PREFIX}_VALIDATE_OPTIONS EXACT)
+ if (${_PYTHON_PREFIX}_FIND_VERSION_RANGE)
+ list (APPEND _${_PYTHON_PREFIX}_VALIDATE_OPTIONS IN_RANGE)
+ endif()
+
+ foreach (_${_PYTHON_PREFIX}_VERSION IN LISTS _${_PYTHON_PREFIX}_FIND_VERSIONS)
+ _python_get_names (_${_PYTHON_PREFIX}_COMPILER_NAMES
+ IMPLEMENTATIONS IronPython
+ VERSION ${_${_PYTHON_PREFIX}_FIND_VERSIONS}
+ COMPILER)
+
+ _python_get_path_suffixes (_${_PYTHON_PREFIX}_PATH_SUFFIXES
+ IMPLEMENTATIONS IronPython
+ VERSION ${_${_PYTHON_PREFIX}_FIND_VERSION}
+ COMPILER)
+
+ _python_get_frameworks (_${_PYTHON_PREFIX}_FRAMEWORK_PATHS
+ IMPLEMENTATIONS IronPython
+ VERSION ${_${_PYTHON_PREFIX}_VERSION})
+ _python_get_registries (_${_PYTHON_PREFIX}_REGISTRY_PATHS
+ IMPLEMENTATIONS IronPython
+ VERSION ${_${_PYTHON_PREFIX}_VERSION})
+
+ # Apple frameworks handling
+ if (CMAKE_HOST_APPLE AND _${_PYTHON_PREFIX}_FIND_FRAMEWORK STREQUAL "FIRST")
+ find_program (_${_PYTHON_PREFIX}_COMPILER
+ NAMES ${_${_PYTHON_PREFIX}_COMPILER_NAMES}
+ NAMES_PER_DIR
+ HINTS ${_${_PYTHON_PREFIX}_IRON_ROOT} ${_${_PYTHON_PREFIX}_HINTS}
+ PATHS ${_${_PYTHON_PREFIX}_FRAMEWORK_PATHS}
+ PATH_SUFFIXES ${_${_PYTHON_PREFIX}_PATH_SUFFIXES}
+ NO_CMAKE_PATH
+ NO_CMAKE_ENVIRONMENT_PATH
+ NO_SYSTEM_ENVIRONMENT_PATH
+ NO_CMAKE_SYSTEM_PATH)
+ _python_validate_compiler (VERSION ${_${_PYTHON_PREFIX}_VERSION} ${_${_PYTHON_PREFIX}_VALIDATE_OPTIONS})
+ if (_${_PYTHON_PREFIX}_COMPILER)
+ break()
+ endif()
+ endif()
+ # Windows registry
+ if (CMAKE_HOST_WIN32 AND _${_PYTHON_PREFIX}_FIND_REGISTRY STREQUAL "FIRST")
+ find_program (_${_PYTHON_PREFIX}_COMPILER
+ NAMES ${_${_PYTHON_PREFIX}_COMPILER_NAMES}
+ NAMES_PER_DIR
+ HINTS ${_${_PYTHON_PREFIX}_IRON_ROOT} ${_${_PYTHON_PREFIX}_HINTS}
+ PATHS ${_${_PYTHON_PREFIX}_REGISTRY_PATHS}
+ PATH_SUFFIXES ${_${_PYTHON_PREFIX}_PATH_SUFFIXES}
+ NO_SYSTEM_ENVIRONMENT_PATH
+ NO_CMAKE_SYSTEM_PATH)
+ _python_validate_compiler (VERSION ${_${_PYTHON_PREFIX}_VERSION} ${_${_PYTHON_PREFIX}_VALIDATE_OPTIONS})
+ if (_${_PYTHON_PREFIX}_COMPILER)
+ break()
+ endif()
+ endif()
+
+ # try using HINTS
+ find_program (_${_PYTHON_PREFIX}_COMPILER
+ NAMES ${_${_PYTHON_PREFIX}_COMPILER_NAMES}
+ NAMES_PER_DIR
+ HINTS ${_${_PYTHON_PREFIX}_IRON_ROOT} ${_${_PYTHON_PREFIX}_HINTS}
+ PATH_SUFFIXES ${_${_PYTHON_PREFIX}_PATH_SUFFIXES}
+ NO_SYSTEM_ENVIRONMENT_PATH
+ NO_CMAKE_SYSTEM_PATH)
+ _python_validate_compiler (VERSION ${_${_PYTHON_PREFIX}_VERSION} ${_${_PYTHON_PREFIX}_VALIDATE_OPTIONS})
+ if (_${_PYTHON_PREFIX}_COMPILER)
+ break()
+ endif()
+
+ # Apple frameworks handling
+ if (CMAKE_HOST_APPLE AND _${_PYTHON_PREFIX}_FIND_FRAMEWORK STREQUAL "LAST")
+ find_program (_${_PYTHON_PREFIX}_COMPILER
+ NAMES ${_${_PYTHON_PREFIX}_COMPILER_NAMES}
+ NAMES_PER_DIR
+ PATHS ${_${_PYTHON_PREFIX}_FRAMEWORK_PATHS}
+ PATH_SUFFIXES ${_${_PYTHON_PREFIX}_PATH_SUFFIXES}
+ NO_DEFAULT_PATH)
+ _python_validate_compiler (VERSION ${_${_PYTHON_PREFIX}_VERSION} ${_${_PYTHON_PREFIX}_VALIDATE_OPTIONS})
+ if (_${_PYTHON_PREFIX}_COMPILER)
+ break()
+ endif()
+ endif()
+ # Windows registry
+ if (CMAKE_HOST_WIN32 AND _${_PYTHON_PREFIX}_FIND_REGISTRY STREQUAL "LAST")
+ find_program (_${_PYTHON_PREFIX}_COMPILER
+ NAMES ${_${_PYTHON_PREFIX}_COMPILER_NAMES}
+ NAMES_PER_DIR
+ PATHS ${_${_PYTHON_PREFIX}_REGISTRY_PATHS}
+ PATH_SUFFIXES ${_${_PYTHON_PREFIX}_PATH_SUFFIXES}
+ NO_DEFAULT_PATH)
+ _python_validate_compiler (VERSION ${_${_PYTHON_PREFIX}_VERSION} ${_${_PYTHON_PREFIX}_VALIDATE_OPTIONS})
+ if (_${_PYTHON_PREFIX}_COMPILER)
+ break()
+ endif()
+ endif()
+ endforeach()
+
+ # no specific version found, re-try in standard paths
+ _python_get_names (_${_PYTHON_PREFIX}_COMPILER_NAMES
+ IMPLEMENTATIONS IronPython
+ VERSION ${_${_PYTHON_PREFIX}_FIND_VERSIONS}
+ COMPILER)
+ _python_get_path_suffixes (_${_PYTHON_PREFIX}_PATH_SUFFIXES
+ IMPLEMENTATIONS IronPython
+ VERSION ${_${_PYTHON_PREFIX}_FIND_VERSIONS}
+ COMPILER)
+ find_program (_${_PYTHON_PREFIX}_COMPILER
+ NAMES ${_${_PYTHON_PREFIX}_COMPILER_NAMES}
+ HINTS ${_${_PYTHON_PREFIX}_IRON_ROOT} ${_${_PYTHON_PREFIX}_HINTS}
+ PATH_SUFFIXES ${_${_PYTHON_PREFIX}_PATH_SUFFIXES})
+ _python_validate_compiler ()
+ endif()
+ endif()
+
+ set (${_PYTHON_PREFIX}_COMPILER "${_${_PYTHON_PREFIX}_COMPILER}")
+
+ if (_${_PYTHON_PREFIX}_COMPILER)
+ # retrieve python environment version from compiler
+ _python_get_launcher (_${_PYTHON_PREFIX}_COMPILER_LAUNCHER COMPILER)
+ set (_${_PYTHON_PREFIX}_VERSION_DIR "${CMAKE_CURRENT_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/PythonCompilerVersion.dir")
+ file (WRITE "${_${_PYTHON_PREFIX}_VERSION_DIR}/version.py" "import sys; sys.stdout.write('.'.join([str(x) for x in sys.version_info[:3]]))\n")
+ execute_process (COMMAND ${_${_PYTHON_PREFIX}_COMPILER_LAUNCHER} "${_${_PYTHON_PREFIX}_COMPILER}"
+ ${_${_PYTHON_PREFIX}_IRON_PYTHON_COMPILER_ARCH_FLAGS}
+ /target:exe /embed "${_${_PYTHON_PREFIX}_VERSION_DIR}/version.py"
+ WORKING_DIRECTORY "${_${_PYTHON_PREFIX}_VERSION_DIR}"
+ OUTPUT_QUIET
+ ERROR_QUIET)
+ get_filename_component (_${_PYTHON_PREFIX}_IR_DIR "${_${_PYTHON_PREFIX}_COMPILER}" DIRECTORY)
+ execute_process (COMMAND "${CMAKE_COMMAND}" -E env "MONO_PATH=${_${_PYTHON_PREFIX}_IR_DIR}"
+ ${${_PYTHON_PREFIX}_DOTNET_LAUNCHER} "${_${_PYTHON_PREFIX}_VERSION_DIR}/version.exe"
+ WORKING_DIRECTORY "${_${_PYTHON_PREFIX}_VERSION_DIR}"
+ RESULT_VARIABLE _${_PYTHON_PREFIX}_RESULT
+ OUTPUT_VARIABLE _${_PYTHON_PREFIX}_VERSION
+ ERROR_QUIET)
+ if (NOT _${_PYTHON_PREFIX}_RESULT)
+ set (_${_PYTHON_PREFIX}_COMPILER_USABLE TRUE)
+ string (REGEX MATCHALL "[0-9]+" _${_PYTHON_PREFIX}_VERSIONS "${_${_PYTHON_PREFIX}_VERSION}")
+ list (GET _${_PYTHON_PREFIX}_VERSIONS 0 _${_PYTHON_PREFIX}_VERSION_MAJOR)
+ list (GET _${_PYTHON_PREFIX}_VERSIONS 1 _${_PYTHON_PREFIX}_VERSION_MINOR)
+ list (GET _${_PYTHON_PREFIX}_VERSIONS 2 _${_PYTHON_PREFIX}_VERSION_PATCH)
+
+ if (NOT ${_PYTHON_PREFIX}_Interpreter_FOUND)
+ # set public version information
+ set (${_PYTHON_PREFIX}_VERSION ${_${_PYTHON_PREFIX}_VERSION})
+ set (${_PYTHON_PREFIX}_VERSION_MAJOR ${_${_PYTHON_PREFIX}_VERSION_MAJOR})
+ set (${_PYTHON_PREFIX}_VERSION_MINOR ${_${_PYTHON_PREFIX}_VERSION_MINOR})
+ set (${_PYTHON_PREFIX}_VERSION_PATCH ${_${_PYTHON_PREFIX}_VERSION_PATCH})
+ endif()
+ else()
+ # compiler not usable
+ set (_${_PYTHON_PREFIX}_COMPILER_USABLE FALSE)
+ set (_${_PYTHON_PREFIX}_Compiler_REASON_FAILURE "Cannot run the compiler \"${_${_PYTHON_PREFIX}_COMPILER}\"")
+ endif()
+ file (REMOVE_RECURSE "${_${_PYTHON_PREFIX}_VERSION_DIR}")
+ endif()
+
+ if (_${_PYTHON_PREFIX}_COMPILER AND _${_PYTHON_PREFIX}_COMPILER_USABLE)
+ if (${_PYTHON_PREFIX}_Interpreter_FOUND)
+ # Compiler must be compatible with interpreter
+ if ("${_${_PYTHON_PREFIX}_VERSION_MAJOR}.${_${_PYTHON_PREFIX}_VERSION_MINOR}" VERSION_EQUAL "${${_PYTHON_PREFIX}_VERSION_MAJOR}.${${_PYTHON_PREFIX}_VERSION_MINOR}")
+ set (${_PYTHON_PREFIX}_Compiler_FOUND TRUE)
+ endif()
+ elseif (${_PYTHON_PREFIX}_VERSION_MAJOR VERSION_EQUAL _${_PYTHON_PREFIX}_REQUIRED_VERSION_MAJOR)
+ set (${_PYTHON_PREFIX}_Compiler_FOUND TRUE)
+ # Use compiler version for future searches to ensure consistency
+ set (_${_PYTHON_PREFIX}_FIND_VERSIONS ${${_PYTHON_PREFIX}_VERSION_MAJOR}.${${_PYTHON_PREFIX}_VERSION_MINOR})
+ endif()
+ endif()
+
+ if (${_PYTHON_PREFIX}_Compiler_FOUND)
+ unset (_${_PYTHON_PREFIX}_Compiler_REASON_FAILURE)
+
+ # compute and save compiler signature
+ string (MD5 __${_PYTHON_PREFIX}_COMPILER_SIGNATURE "${_${_PYTHON_PREFIX}_SIGNATURE}:${_${_PYTHON_PREFIX}_COMPILER}")
+ set (_${_PYTHON_PREFIX}_COMPILER_SIGNATURE "${__${_PYTHON_PREFIX}_COMPILER_SIGNATURE}" CACHE INTERNAL "")
+
+ set (${_PYTHON_PREFIX}_COMPILER_ID IronPython)
+ else()
+ unset (_${_PYTHON_PREFIX}_COMPILER_SIGNATURE CACHE)
+ unset (${_PYTHON_PREFIX}_COMPILER_ID)
+ endif()
+
+ if (${_PYTHON_PREFIX}_ARTIFACTS_INTERACTIVE)
+ set (${_PYTHON_PREFIX}_COMPILER "${_${_PYTHON_PREFIX}_COMPILER}" CACHE FILEPATH "${_PYTHON_PREFIX} Compiler")
+ endif()
+
+ _python_mark_as_internal (_${_PYTHON_PREFIX}_COMPILER
+ _${_PYTHON_PREFIX}_COMPILER_SIGNATURE)
+endif()
+
+# third step, search for the development artifacts
+if (${_PYTHON_PREFIX}_FIND_REQUIRED_Development.Module)
+ if ("LIBRARY" IN_LIST _${_PYTHON_PREFIX}_FIND_DEVELOPMENT_MODULE_ARTIFACTS)
+ list (APPEND _${_PYTHON_PREFIX}_REQUIRED_VARS ${_PYTHON_PREFIX}_LIBRARIES)
+ endif()
+ if ("INCLUDE_DIR" IN_LIST _${_PYTHON_PREFIX}_FIND_DEVELOPMENT_MODULE_ARTIFACTS)
+ list (APPEND _${_PYTHON_PREFIX}_REQUIRED_VARS ${_PYTHON_PREFIX}_INCLUDE_DIRS)
+ endif()
+endif()
+if (${_PYTHON_PREFIX}_FIND_REQUIRED_Development.Embed)
+ if ("LIBRARY" IN_LIST _${_PYTHON_PREFIX}_FIND_DEVELOPMENT_EMBED_ARTIFACTS)
+ list (APPEND _${_PYTHON_PREFIX}_REQUIRED_VARS ${_PYTHON_PREFIX}_LIBRARIES)
+ endif()
+ if ("INCLUDE_DIR" IN_LIST _${_PYTHON_PREFIX}_FIND_DEVELOPMENT_EMBED_ARTIFACTS)
+ list (APPEND _${_PYTHON_PREFIX}_REQUIRED_VARS ${_PYTHON_PREFIX}_INCLUDE_DIRS)
+ endif()
+endif()
+if (_${_PYTHON_PREFIX}_REQUIRED_VARS) # Behavior change in CMake 3.14
+ list (REMOVE_DUPLICATES _${_PYTHON_PREFIX}_REQUIRED_VARS)
+endif ()
+## Development environment is not compatible with IronPython interpreter
+if (("Development.Module" IN_LIST ${_PYTHON_PREFIX}_FIND_COMPONENTS
+ OR "Development.Embed" IN_LIST ${_PYTHON_PREFIX}_FIND_COMPONENTS)
+ AND ((${_PYTHON_PREFIX}_Interpreter_FOUND
+ AND NOT ${_PYTHON_PREFIX}_INTERPRETER_ID STREQUAL "IronPython")
+ OR NOT ${_PYTHON_PREFIX}_Interpreter_FOUND))
+ if (${_PYTHON_PREFIX}_Interpreter_FOUND)
+ # reduce possible implementations to the interpreter one
+ if (${_PYTHON_PREFIX}_INTERPRETER_ID STREQUAL "PyPy")
+ set (_${_PYTHON_PREFIX}_FIND_IMPLEMENTATIONS "PyPy")
+ else()
+ set (_${_PYTHON_PREFIX}_FIND_IMPLEMENTATIONS "CPython")
+ endif()
+ else()
+ list (REMOVE_ITEM _${_PYTHON_PREFIX}_FIND_IMPLEMENTATIONS "IronPython")
+ endif()
+ if ("LIBRARY" IN_LIST _${_PYTHON_PREFIX}_FIND_DEVELOPMENT_ARTIFACTS)
+ list (APPEND _${_PYTHON_PREFIX}_CACHED_VARS _${_PYTHON_PREFIX}_LIBRARY_RELEASE
+ _${_PYTHON_PREFIX}_RUNTIME_LIBRARY_RELEASE
+ _${_PYTHON_PREFIX}_LIBRARY_DEBUG
+ _${_PYTHON_PREFIX}_RUNTIME_LIBRARY_DEBUG)
+ endif()
+ if ("INCLUDE_DIR" IN_LIST _${_PYTHON_PREFIX}_FIND_DEVELOPMENT_ARTIFACTS)
+ list (APPEND _${_PYTHON_PREFIX}_CACHED_VARS _${_PYTHON_PREFIX}_INCLUDE_DIR)
+ endif()
+
+ _python_check_development_signature (Module)
+ _python_check_development_signature (Embed)
+
+ if (DEFINED ${_PYTHON_PREFIX}_LIBRARY
+ AND IS_ABSOLUTE "${${_PYTHON_PREFIX}_LIBRARY}")
+ set (_${_PYTHON_PREFIX}_LIBRARY_RELEASE "${${_PYTHON_PREFIX}_LIBRARY}" CACHE INTERNAL "")
+ unset (_${_PYTHON_PREFIX}_LIBRARY_DEBUG CACHE)
+ unset (_${_PYTHON_PREFIX}_INCLUDE_DIR CACHE)
+ endif()
+ if (DEFINED ${_PYTHON_PREFIX}_INCLUDE_DIR
+ AND IS_ABSOLUTE "${${_PYTHON_PREFIX}_INCLUDE_DIR}")
+ set (_${_PYTHON_PREFIX}_INCLUDE_DIR "${${_PYTHON_PREFIX}_INCLUDE_DIR}" CACHE INTERNAL "")
+ endif()
+
+ # Support preference of static libs by adjusting CMAKE_FIND_LIBRARY_SUFFIXES
+ unset (_${_PYTHON_PREFIX}_CMAKE_FIND_LIBRARY_SUFFIXES)
+ if (DEFINED ${_PYTHON_PREFIX}_USE_STATIC_LIBS AND NOT WIN32)
+ set(_${_PYTHON_PREFIX}_CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES})
+ if(${_PYTHON_PREFIX}_USE_STATIC_LIBS)
+ set (CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_STATIC_LIBRARY_SUFFIX})
+ else()
+ list (REMOVE_ITEM CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_STATIC_LIBRARY_SUFFIX})
+ endif()
+ endif()
+
+ if (NOT _${_PYTHON_PREFIX}_LIBRARY_RELEASE OR NOT _${_PYTHON_PREFIX}_INCLUDE_DIR)
+ # if python interpreter is found, use it to look-up for artifacts
+ # to ensure consistency between interpreter and development environments.
+ # If not, try to locate a compatible config tool
+ if ((NOT ${_PYTHON_PREFIX}_Interpreter_FOUND OR CMAKE_CROSSCOMPILING)
+ AND "CPython" IN_LIST _${_PYTHON_PREFIX}_FIND_IMPLEMENTATIONS)
+ set (_${_PYTHON_PREFIX}_HINTS "${${_PYTHON_PREFIX}_ROOT_DIR}" ENV ${_PYTHON_PREFIX}_ROOT_DIR)
+ unset (_${_PYTHON_PREFIX}_VIRTUALENV_PATHS)
+ if (_${_PYTHON_PREFIX}_FIND_VIRTUALENV MATCHES "^(FIRST|ONLY)$")
+ set (_${_PYTHON_PREFIX}_VIRTUALENV_PATHS ENV VIRTUAL_ENV ENV CONDA_PREFIX)
+ endif()
+
+ if (_${_PYTHON_PREFIX}_FIND_STRATEGY STREQUAL "LOCATION")
+ _python_get_names (_${_PYTHON_PREFIX}_CONFIG_NAMES VERSION ${_${_PYTHON_PREFIX}_FIND_VERSIONS} POSIX CONFIG)
+ # Framework Paths
+ _python_get_frameworks (_${_PYTHON_PREFIX}_FRAMEWORK_PATHS VERSION ${_${_PYTHON_PREFIX}_FIND_VERSIONS})
+
+ # Apple frameworks handling
+ if (CMAKE_HOST_APPLE AND _${_PYTHON_PREFIX}_FIND_FRAMEWORK STREQUAL "FIRST")
+ find_program (_${_PYTHON_PREFIX}_CONFIG
+ NAMES ${_${_PYTHON_PREFIX}_CONFIG_NAMES}
+ NAMES_PER_DIR
+ HINTS ${_${_PYTHON_PREFIX}_HINTS}
+ PATHS ${_${_PYTHON_PREFIX}_VIRTUALENV_PATHS}
+ ${_${_PYTHON_PREFIX}_FRAMEWORK_PATHS}
+ PATH_SUFFIXES bin
+ NO_CMAKE_PATH
+ NO_CMAKE_ENVIRONMENT_PATH
+ NO_SYSTEM_ENVIRONMENT_PATH
+ NO_CMAKE_SYSTEM_PATH)
+ endif()
+
+ find_program (_${_PYTHON_PREFIX}_CONFIG
+ NAMES ${_${_PYTHON_PREFIX}_CONFIG_NAMES}
+ NAMES_PER_DIR
+ HINTS ${_${_PYTHON_PREFIX}_HINTS}
+ PATHS ${_${_PYTHON_PREFIX}_VIRTUALENV_PATHS}
+ PATH_SUFFIXES bin)
+
+ # Apple frameworks handling
+ if (CMAKE_HOST_APPLE AND _${_PYTHON_PREFIX}_FIND_FRAMEWORK STREQUAL "LAST")
+ find_program (_${_PYTHON_PREFIX}_CONFIG
+ NAMES ${_${_PYTHON_PREFIX}_CONFIG_NAMES}
+ NAMES_PER_DIR
+ PATHS ${_${_PYTHON_PREFIX}_FRAMEWORK_PATHS}
+ PATH_SUFFIXES bin
+ NO_DEFAULT_PATH)
+ endif()
+
+ if (_${_PYTHON_PREFIX}_CONFIG)
+ execute_process (COMMAND "${_${_PYTHON_PREFIX}_CONFIG}" --help
+ RESULT_VARIABLE _${_PYTHON_PREFIX}_RESULT
+ OUTPUT_VARIABLE __${_PYTHON_PREFIX}_HELP
+ ERROR_QUIET
+ OUTPUT_STRIP_TRAILING_WHITESPACE)
+ if (_${_PYTHON_PREFIX}_RESULT)
+ # assume config tool is not usable
+ unset (_${_PYTHON_PREFIX}_CONFIG CACHE)
+ endif()
+ endif()
+
+ if (_${_PYTHON_PREFIX}_CONFIG)
+ execute_process (COMMAND "${_${_PYTHON_PREFIX}_CONFIG}" --abiflags
+ RESULT_VARIABLE _${_PYTHON_PREFIX}_RESULT
+ OUTPUT_VARIABLE __${_PYTHON_PREFIX}_ABIFLAGS
+ ERROR_QUIET
+ OUTPUT_STRIP_TRAILING_WHITESPACE)
+ if (_${_PYTHON_PREFIX}_RESULT)
+ # assume ABI is not supported
+ set (__${_PYTHON_PREFIX}_ABIFLAGS "")
+ endif()
+ if (DEFINED _${_PYTHON_PREFIX}_FIND_ABI AND NOT __${_PYTHON_PREFIX}_ABIFLAGS IN_LIST _${_PYTHON_PREFIX}_ABIFLAGS)
+ # Wrong ABI
+ unset (_${_PYTHON_PREFIX}_CONFIG CACHE)
+ endif()
+ endif()
+
+ if (_${_PYTHON_PREFIX}_CONFIG AND DEFINED CMAKE_LIBRARY_ARCHITECTURE)
+ # check that config tool match library architecture
+ execute_process (COMMAND "${_${_PYTHON_PREFIX}_CONFIG}" --configdir
+ RESULT_VARIABLE _${_PYTHON_PREFIX}_RESULT
+ OUTPUT_VARIABLE _${_PYTHON_PREFIX}_CONFIGDIR
+ ERROR_QUIET
+ OUTPUT_STRIP_TRAILING_WHITESPACE)
+ if (_${_PYTHON_PREFIX}_RESULT)
+ unset (_${_PYTHON_PREFIX}_CONFIG CACHE)
+ else()
+ string(FIND "${_${_PYTHON_PREFIX}_CONFIGDIR}" "${CMAKE_LIBRARY_ARCHITECTURE}" _${_PYTHON_PREFIX}_RESULT)
+ if (_${_PYTHON_PREFIX}_RESULT EQUAL -1)
+ unset (_${_PYTHON_PREFIX}_CONFIG CACHE)
+ endif()
+ endif()
+ endif()
+ else()
+ foreach (_${_PYTHON_PREFIX}_VERSION IN LISTS _${_PYTHON_PREFIX}_FIND_VERSIONS)
+ # try to use pythonX.Y-config tool
+ _python_get_names (_${_PYTHON_PREFIX}_CONFIG_NAMES VERSION ${_${_PYTHON_PREFIX}_VERSION} POSIX CONFIG)
+
+ # Framework Paths
+ _python_get_frameworks (_${_PYTHON_PREFIX}_FRAMEWORK_PATHS VERSION ${_${_PYTHON_PREFIX}_VERSION})
+
+ # Apple frameworks handling
+ if (CMAKE_HOST_APPLE AND _${_PYTHON_PREFIX}_FIND_FRAMEWORK STREQUAL "FIRST")
+ find_program (_${_PYTHON_PREFIX}_CONFIG
+ NAMES ${_${_PYTHON_PREFIX}_CONFIG_NAMES}
+ NAMES_PER_DIR
+ HINTS ${_${_PYTHON_PREFIX}_HINTS}
+ PATHS ${_${_PYTHON_PREFIX}_VIRTUALENV_PATHS}
+ ${_${_PYTHON_PREFIX}_FRAMEWORK_PATHS}
+ PATH_SUFFIXES bin
+ NO_CMAKE_PATH
+ NO_CMAKE_ENVIRONMENT_PATH
+ NO_SYSTEM_ENVIRONMENT_PATH
+ NO_CMAKE_SYSTEM_PATH)
+ endif()
+
+ find_program (_${_PYTHON_PREFIX}_CONFIG
+ NAMES ${_${_PYTHON_PREFIX}_CONFIG_NAMES}
+ NAMES_PER_DIR
+ HINTS ${_${_PYTHON_PREFIX}_HINTS}
+ PATHS ${_${_PYTHON_PREFIX}_VIRTUALENV_PATHS}
+ PATH_SUFFIXES bin)
+
+ # Apple frameworks handling
+ if (CMAKE_HOST_APPLE AND _${_PYTHON_PREFIX}_FIND_FRAMEWORK STREQUAL "LAST")
+ find_program (_${_PYTHON_PREFIX}_CONFIG
+ NAMES ${_${_PYTHON_PREFIX}_CONFIG_NAMES}
+ NAMES_PER_DIR
+ PATHS ${_${_PYTHON_PREFIX}_FRAMEWORK_PATHS}
+ PATH_SUFFIXES bin
+ NO_DEFAULT_PATH)
+ endif()
+
+ unset (_${_PYTHON_PREFIX}_CONFIG_NAMES)
+
+ if (_${_PYTHON_PREFIX}_CONFIG)
+ execute_process (COMMAND "${_${_PYTHON_PREFIX}_CONFIG}" --help
+ RESULT_VARIABLE _${_PYTHON_PREFIX}_RESULT
+ OUTPUT_VARIABLE __${_PYTHON_PREFIX}_HELP
+ ERROR_QUIET
+ OUTPUT_STRIP_TRAILING_WHITESPACE)
+ if (_${_PYTHON_PREFIX}_RESULT)
+ # assume config tool is not usable
+ unset (_${_PYTHON_PREFIX}_CONFIG CACHE)
+ endif()
+ endif()
+
+ if (NOT _${_PYTHON_PREFIX}_CONFIG)
+ continue()
+ endif()
+
+ execute_process (COMMAND "${_${_PYTHON_PREFIX}_CONFIG}" --abiflags
+ RESULT_VARIABLE _${_PYTHON_PREFIX}_RESULT
+ OUTPUT_VARIABLE __${_PYTHON_PREFIX}_ABIFLAGS
+ ERROR_QUIET
+ OUTPUT_STRIP_TRAILING_WHITESPACE)
+ if (_${_PYTHON_PREFIX}_RESULT)
+ # assume ABI is not supported
+ set (__${_PYTHON_PREFIX}_ABIFLAGS "")
+ endif()
+ if (DEFINED _${_PYTHON_PREFIX}_FIND_ABI AND NOT __${_PYTHON_PREFIX}_ABIFLAGS IN_LIST _${_PYTHON_PREFIX}_ABIFLAGS)
+ # Wrong ABI
+ unset (_${_PYTHON_PREFIX}_CONFIG CACHE)
+ continue()
+ endif()
+
+ if (_${_PYTHON_PREFIX}_CONFIG AND DEFINED CMAKE_LIBRARY_ARCHITECTURE)
+ # check that config tool match library architecture
+ execute_process (COMMAND "${_${_PYTHON_PREFIX}_CONFIG}" --configdir
+ RESULT_VARIABLE _${_PYTHON_PREFIX}_RESULT
+ OUTPUT_VARIABLE _${_PYTHON_PREFIX}_CONFIGDIR
+ ERROR_QUIET
+ OUTPUT_STRIP_TRAILING_WHITESPACE)
+ if (_${_PYTHON_PREFIX}_RESULT)
+ unset (_${_PYTHON_PREFIX}_CONFIG CACHE)
+ continue()
+ endif()
+ string (FIND "${_${_PYTHON_PREFIX}_CONFIGDIR}" "${CMAKE_LIBRARY_ARCHITECTURE}" _${_PYTHON_PREFIX}_RESULT)
+ if (_${_PYTHON_PREFIX}_RESULT EQUAL -1)
+ unset (_${_PYTHON_PREFIX}_CONFIG CACHE)
+ continue()
+ endif()
+ endif()
+
+ if (_${_PYTHON_PREFIX}_CONFIG)
+ break()
+ endif()
+ endforeach()
+ endif()
+ endif()
+ endif()
+
+ if ("LIBRARY" IN_LIST _${_PYTHON_PREFIX}_FIND_DEVELOPMENT_ARTIFACTS)
+ if (NOT _${_PYTHON_PREFIX}_LIBRARY_RELEASE)
+ if ((${_PYTHON_PREFIX}_Interpreter_FOUND AND NOT CMAKE_CROSSCOMPILING) OR _${_PYTHON_PREFIX}_CONFIG)
+ # retrieve root install directory
+ _python_get_config_var (_${_PYTHON_PREFIX}_PREFIX PREFIX)
+
+ # enforce current ABI
+ _python_get_config_var (_${_PYTHON_PREFIX}_ABIFLAGS ABIFLAGS)
+
+ set (_${_PYTHON_PREFIX}_HINTS "${_${_PYTHON_PREFIX}_PREFIX}")
+
+ # retrieve library
+ ## compute some paths and artifact names
+ if (_${_PYTHON_PREFIX}_CONFIG)
+ string (REGEX REPLACE "^.+python([0-9.]+)[a-z]*-config" "\\1" _${_PYTHON_PREFIX}_VERSION "${_${_PYTHON_PREFIX}_CONFIG}")
+ else()
+ set (_${_PYTHON_PREFIX}_VERSION "${${_PYTHON_PREFIX}_VERSION_MAJOR}.${${_PYTHON_PREFIX}_VERSION_MINOR}")
+ endif()
+ _python_get_path_suffixes (_${_PYTHON_PREFIX}_PATH_SUFFIXES VERSION ${_${_PYTHON_PREFIX}_VERSION} LIBRARY)
+ _python_get_names (_${_PYTHON_PREFIX}_LIB_NAMES VERSION ${_${_PYTHON_PREFIX}_VERSION} WIN32 POSIX LIBRARY)
+
+ _python_get_config_var (_${_PYTHON_PREFIX}_CONFIGDIR CONFIGDIR)
+ list (APPEND _${_PYTHON_PREFIX}_HINTS "${_${_PYTHON_PREFIX}_CONFIGDIR}")
+
+ list (APPEND _${_PYTHON_PREFIX}_HINTS "${${_PYTHON_PREFIX}_ROOT_DIR}" ENV ${_PYTHON_PREFIX}_ROOT_DIR)
+
+ find_library (_${_PYTHON_PREFIX}_LIBRARY_RELEASE
+ NAMES ${_${_PYTHON_PREFIX}_LIB_NAMES}
+ NAMES_PER_DIR
+ HINTS ${_${_PYTHON_PREFIX}_HINTS}
+ PATH_SUFFIXES ${_${_PYTHON_PREFIX}_PATH_SUFFIXES}
+ NO_SYSTEM_ENVIRONMENT_PATH
+ NO_CMAKE_SYSTEM_PATH)
+ endif()
+
+ # Rely on HINTS and standard paths if interpreter or config tool failed to locate artifacts
+ if (NOT _${_PYTHON_PREFIX}_LIBRARY_RELEASE)
+ set (_${_PYTHON_PREFIX}_HINTS "${${_PYTHON_PREFIX}_ROOT_DIR}" ENV ${_PYTHON_PREFIX}_ROOT_DIR)
+
+ unset (_${_PYTHON_PREFIX}_VIRTUALENV_PATHS)
+ if (_${_PYTHON_PREFIX}_FIND_VIRTUALENV MATCHES "^(FIRST|ONLY)$")
+ set (_${_PYTHON_PREFIX}_VIRTUALENV_PATHS ENV VIRTUAL_ENV ENV CONDA_PREFIX)
+ endif()
+
+ if (_${_PYTHON_PREFIX}_FIND_STRATEGY STREQUAL "LOCATION")
+ # library names
+ _python_get_names (_${_PYTHON_PREFIX}_LIB_NAMES VERSION ${_${_PYTHON_PREFIX}_FIND_VERSIONS} WIN32 POSIX LIBRARY)
+ _python_get_names (_${_PYTHON_PREFIX}_LIB_NAMES_DEBUG VERSION ${_${_PYTHON_PREFIX}_FIND_VERSIONS} WIN32 DEBUG)
+ # Paths suffixes
+ _python_get_path_suffixes (_${_PYTHON_PREFIX}_PATH_SUFFIXES VERSION ${_${_PYTHON_PREFIX}_FIND_VERSIONS} LIBRARY)
+
+ # Framework Paths
+ _python_get_frameworks (_${_PYTHON_PREFIX}_FRAMEWORK_PATHS VERSION ${_${_PYTHON_PREFIX}_LIB_FIND_VERSIONS})
+ # Registry Paths
+ _python_get_registries (_${_PYTHON_PREFIX}_REGISTRY_PATHS VERSION ${_${_PYTHON_PREFIX}_FIND_VERSIONS} )
+
+ if (APPLE AND _${_PYTHON_PREFIX}_FIND_FRAMEWORK STREQUAL "FIRST")
+ find_library (_${_PYTHON_PREFIX}_LIBRARY_RELEASE
+ NAMES ${_${_PYTHON_PREFIX}_LIB_NAMES}
+ NAMES_PER_DIR
+ HINTS ${_${_PYTHON_PREFIX}_HINTS}
+ PATHS ${_${_PYTHON_PREFIX}_VIRTUALENV_PATHS}
+ ${_${_PYTHON_PREFIX}_FRAMEWORK_PATHS}
+ PATH_SUFFIXES ${_${_PYTHON_PREFIX}_PATH_SUFFIXES}
+ NO_CMAKE_PATH
+ NO_CMAKE_ENVIRONMENT_PATH
+ NO_SYSTEM_ENVIRONMENT_PATH
+ NO_CMAKE_SYSTEM_PATH)
+ endif()
+
+ if (WIN32 AND _${_PYTHON_PREFIX}_FIND_REGISTRY STREQUAL "FIRST")
+ find_library (_${_PYTHON_PREFIX}_LIBRARY_RELEASE
+ NAMES ${_${_PYTHON_PREFIX}_LIB_NAMES}
+ NAMES_PER_DIR
+ HINTS ${_${_PYTHON_PREFIX}_HINTS}
+ PATHS ${_${_PYTHON_PREFIX}_VIRTUALENV_PATHS}
+ ${_${_PYTHON_PREFIX}_REGISTRY_PATHS}
+ PATH_SUFFIXES ${_${_PYTHON_PREFIX}_PATH_SUFFIXES}
+ NO_SYSTEM_ENVIRONMENT_PATH
+ NO_CMAKE_SYSTEM_PATH)
+ endif()
+
+ # search in HINTS locations
+ find_library (_${_PYTHON_PREFIX}_LIBRARY_RELEASE
+ NAMES ${_${_PYTHON_PREFIX}_LIB_NAMES}
+ NAMES_PER_DIR
+ HINTS ${_${_PYTHON_PREFIX}_HINTS}
+ PATHS ${_${_PYTHON_PREFIX}_VIRTUALENV_PATHS}
+ PATH_SUFFIXES ${_${_PYTHON_PREFIX}_PATH_SUFFIXES}
+ NO_SYSTEM_ENVIRONMENT_PATH
+ NO_CMAKE_SYSTEM_PATH)
+
+ if (APPLE AND _${_PYTHON_PREFIX}_FIND_FRAMEWORK STREQUAL "LAST")
+ set (__${_PYTHON_PREFIX}_FRAMEWORK_PATHS ${_${_PYTHON_PREFIX}_FRAMEWORK_PATHS})
+ else()
+ unset (__${_PYTHON_PREFIX}_FRAMEWORK_PATHS)
+ endif()
+
+ if (WIN32 AND _${_PYTHON_PREFIX}_FIND_REGISTRY STREQUAL "LAST")
+ set (__${_PYTHON_PREFIX}_REGISTRY_PATHS ${_${_PYTHON_PREFIX}_REGISTRY_PATHS})
+ else()
+ unset (__${_PYTHON_PREFIX}_REGISTRY_PATHS)
+ endif()
+
+ # search in all default paths
+ find_library (_${_PYTHON_PREFIX}_LIBRARY_RELEASE
+ NAMES ${_${_PYTHON_PREFIX}_LIB_NAMES}
+ NAMES_PER_DIR
+ PATHS ${__${_PYTHON_PREFIX}_FRAMEWORK_PATHS}
+ ${__${_PYTHON_PREFIX}_REGISTRY_PATHS}
+ PATH_SUFFIXES ${_${_PYTHON_PREFIX}_PATH_SUFFIXES})
+ else()
+ foreach (_${_PYTHON_PREFIX}_LIB_VERSION IN LISTS _${_PYTHON_PREFIX}_FIND_VERSIONS)
+ _python_get_names (_${_PYTHON_PREFIX}_LIB_NAMES VERSION ${_${_PYTHON_PREFIX}_LIB_VERSION} WIN32 POSIX LIBRARY)
+ _python_get_names (_${_PYTHON_PREFIX}_LIB_NAMES_DEBUG VERSION ${_${_PYTHON_PREFIX}_LIB_VERSION} WIN32 DEBUG)
+
+ _python_get_frameworks (_${_PYTHON_PREFIX}_FRAMEWORK_PATHS VERSION ${_${_PYTHON_PREFIX}_LIB_VERSION})
+ _python_get_registries (_${_PYTHON_PREFIX}_REGISTRY_PATHS VERSION ${_${_PYTHON_PREFIX}_LIB_VERSION})
+
+ _python_get_path_suffixes (_${_PYTHON_PREFIX}_PATH_SUFFIXES VERSION ${_${_PYTHON_PREFIX}_LIB_VERSION} LIBRARY)
+
+ if (APPLE AND _${_PYTHON_PREFIX}_FIND_FRAMEWORK STREQUAL "FIRST")
+ find_library (_${_PYTHON_PREFIX}_LIBRARY_RELEASE
+ NAMES ${_${_PYTHON_PREFIX}_LIB_NAMES}
+ NAMES_PER_DIR
+ HINTS ${_${_PYTHON_PREFIX}_HINTS}
+ PATHS ${_${_PYTHON_PREFIX}_VIRTUALENV_PATHS}
+ ${_${_PYTHON_PREFIX}_FRAMEWORK_PATHS}
+ PATH_SUFFIXES ${_${_PYTHON_PREFIX}_PATH_SUFFIXES}
+ NO_CMAKE_PATH
+ NO_CMAKE_ENVIRONMENT_PATH
+ NO_SYSTEM_ENVIRONMENT_PATH
+ NO_CMAKE_SYSTEM_PATH)
+ endif()
+
+ if (WIN32 AND _${_PYTHON_PREFIX}_FIND_REGISTRY STREQUAL "FIRST")
+ find_library (_${_PYTHON_PREFIX}_LIBRARY_RELEASE
+ NAMES ${_${_PYTHON_PREFIX}_LIB_NAMES}
+ NAMES_PER_DIR
+ HINTS ${_${_PYTHON_PREFIX}_HINTS}
+ PATHS ${_${_PYTHON_PREFIX}_VIRTUALENV_PATHS}
+ ${_${_PYTHON_PREFIX}_REGISTRY_PATHS}
+ PATH_SUFFIXES ${_${_PYTHON_PREFIX}_PATH_SUFFIXES}
+ NO_SYSTEM_ENVIRONMENT_PATH
+ NO_CMAKE_SYSTEM_PATH)
+ endif()
+
+ # search in HINTS locations
+ find_library (_${_PYTHON_PREFIX}_LIBRARY_RELEASE
+ NAMES ${_${_PYTHON_PREFIX}_LIB_NAMES}
+ NAMES_PER_DIR
+ HINTS ${_${_PYTHON_PREFIX}_HINTS}
+ PATHS ${_${_PYTHON_PREFIX}_VIRTUALENV_PATHS}
+ PATH_SUFFIXES ${_${_PYTHON_PREFIX}_PATH_SUFFIXES}
+ NO_SYSTEM_ENVIRONMENT_PATH
+ NO_CMAKE_SYSTEM_PATH)
+
+ if (APPLE AND _${_PYTHON_PREFIX}_FIND_FRAMEWORK STREQUAL "LAST")
+ set (__${_PYTHON_PREFIX}_FRAMEWORK_PATHS ${_${_PYTHON_PREFIX}_FRAMEWORK_PATHS})
+ else()
+ unset (__${_PYTHON_PREFIX}_FRAMEWORK_PATHS)
+ endif()
+
+ if (WIN32 AND _${_PYTHON_PREFIX}_FIND_REGISTRY STREQUAL "LAST")
+ set (__${_PYTHON_PREFIX}_REGISTRY_PATHS ${_${_PYTHON_PREFIX}_REGISTRY_PATHS})
+ else()
+ unset (__${_PYTHON_PREFIX}_REGISTRY_PATHS)
+ endif()
+
+ # search in all default paths
+ find_library (_${_PYTHON_PREFIX}_LIBRARY_RELEASE
+ NAMES ${_${_PYTHON_PREFIX}_LIB_NAMES}
+ NAMES_PER_DIR
+ PATHS ${__${_PYTHON_PREFIX}_FRAMEWORK_PATHS}
+ ${__${_PYTHON_PREFIX}_REGISTRY_PATHS}
+ PATH_SUFFIXES ${_${_PYTHON_PREFIX}_PATH_SUFFIXES})
+
+ if (_${_PYTHON_PREFIX}_LIBRARY_RELEASE)
+ break()
+ endif()
+ endforeach()
+ endif()
+ endif()
+ endif()
+
+ # finalize library version information
+ _python_get_version (LIBRARY PREFIX _${_PYTHON_PREFIX}_)
+ if (_${_PYTHON_PREFIX}_VERSION EQUAL "${_${_PYTHON_PREFIX}_REQUIRED_VERSION_MAJOR}")
+ # not able to extract full version from library name
+ if (${_PYTHON_PREFIX}_Interpreter_FOUND)
+ # update from interpreter
+ set (_${_PYTHON_PREFIX}_VERSION ${${_PYTHON_PREFIX}_VERSION})
+ set (_${_PYTHON_PREFIX}_VERSION_MAJOR ${${_PYTHON_PREFIX}_VERSION_MAJOR})
+ set (_${_PYTHON_PREFIX}_VERSION_MINOR ${${_PYTHON_PREFIX}_VERSION_MINOR})
+ set (_${_PYTHON_PREFIX}_VERSION_PATCH ${${_PYTHON_PREFIX}_VERSION_PATCH})
+ endif()
+ endif()
+
+ set (${_PYTHON_PREFIX}_LIBRARY_RELEASE "${_${_PYTHON_PREFIX}_LIBRARY_RELEASE}")
+
+ if (_${_PYTHON_PREFIX}_LIBRARY_RELEASE AND NOT EXISTS "${_${_PYTHON_PREFIX}_LIBRARY_RELEASE}")
+ set (_${_PYTHON_PREFIX}_Development_REASON_FAILURE "Cannot find the library \"${_${_PYTHON_PREFIX}_LIBRARY_RELEASE}\"")
+ set_property (CACHE _${_PYTHON_PREFIX}_LIBRARY_RELEASE PROPERTY VALUE "${_PYTHON_PREFIX}_LIBRARY_RELEASE-NOTFOUND")
+ endif()
+
+ set (_${_PYTHON_PREFIX}_HINTS "${${_PYTHON_PREFIX}_ROOT_DIR}" ENV ${_PYTHON_PREFIX}_ROOT_DIR)
+
+ if (WIN32 AND _${_PYTHON_PREFIX}_LIBRARY_RELEASE)
+ # search for debug library
+ # use release library location as a hint
+ _python_get_names (_${_PYTHON_PREFIX}_LIB_NAMES_DEBUG VERSION ${_${_PYTHON_PREFIX}_VERSION} WIN32 DEBUG)
+ get_filename_component (_${_PYTHON_PREFIX}_PATH "${_${_PYTHON_PREFIX}_LIBRARY_RELEASE}" DIRECTORY)
+ find_library (_${_PYTHON_PREFIX}_LIBRARY_DEBUG
+ NAMES ${_${_PYTHON_PREFIX}_LIB_NAMES_DEBUG}
+ NAMES_PER_DIR
+ HINTS "${_${_PYTHON_PREFIX}_PATH}" ${_${_PYTHON_PREFIX}_HINTS}
+ NO_DEFAULT_PATH)
+ # second try including CMAKE variables to catch-up non conventional layouts
+ find_library (_${_PYTHON_PREFIX}_LIBRARY_DEBUG
+ NAMES ${_${_PYTHON_PREFIX}_LIB_NAMES_DEBUG}
+ NAMES_PER_DIR
+ NO_SYSTEM_ENVIRONMENT_PATH
+ NO_CMAKE_SYSTEM_PATH)
+ endif()
+
+ # retrieve runtime libraries
+ if (_${_PYTHON_PREFIX}_LIBRARY_RELEASE)
+ _python_get_names (_${_PYTHON_PREFIX}_LIB_NAMES VERSION ${_${_PYTHON_PREFIX}_VERSION} WIN32 POSIX LIBRARY)
+ get_filename_component (_${_PYTHON_PREFIX}_PATH "${_${_PYTHON_PREFIX}_LIBRARY_RELEASE}" DIRECTORY)
+ get_filename_component (_${_PYTHON_PREFIX}_PATH2 "${_${_PYTHON_PREFIX}_PATH}" DIRECTORY)
+ _python_find_runtime_library (_${_PYTHON_PREFIX}_RUNTIME_LIBRARY_RELEASE
+ NAMES ${_${_PYTHON_PREFIX}_LIB_NAMES}
+ NAMES_PER_DIR
+ HINTS "${_${_PYTHON_PREFIX}_PATH}"
+ "${_${_PYTHON_PREFIX}_PATH2}" ${_${_PYTHON_PREFIX}_HINTS}
+ PATH_SUFFIXES bin)
+ endif()
+ if (_${_PYTHON_PREFIX}_LIBRARY_DEBUG)
+ _python_get_names (_${_PYTHON_PREFIX}_LIB_NAMES_DEBUG VERSION ${_${_PYTHON_PREFIX}_VERSION} WIN32 DEBUG)
+ get_filename_component (_${_PYTHON_PREFIX}_PATH "${_${_PYTHON_PREFIX}_LIBRARY_DEBUG}" DIRECTORY)
+ get_filename_component (_${_PYTHON_PREFIX}_PATH2 "${_${_PYTHON_PREFIX}_PATH}" DIRECTORY)
+ _python_find_runtime_library (_${_PYTHON_PREFIX}_RUNTIME_LIBRARY_DEBUG
+ NAMES ${_${_PYTHON_PREFIX}_LIB_NAMES_DEBUG}
+ NAMES_PER_DIR
+ HINTS "${_${_PYTHON_PREFIX}_PATH}"
+ "${_${_PYTHON_PREFIX}_PATH2}" ${_${_PYTHON_PREFIX}_HINTS}
+ PATH_SUFFIXES bin)
+ endif()
+ endif()
+
+ if ("INCLUDE_DIR" IN_LIST _${_PYTHON_PREFIX}_FIND_DEVELOPMENT_ARTIFACTS)
+ while (NOT _${_PYTHON_PREFIX}_INCLUDE_DIR)
+ if ("LIBRARY" IN_LIST _${_PYTHON_PREFIX}_FIND_DEVELOPMENT_ARTIFACTS
+ AND NOT _${_PYTHON_PREFIX}_LIBRARY_RELEASE)
+ # Don't search for include dir if no library was founded
+ break()
+ endif()
+
+ if ((${_PYTHON_PREFIX}_Interpreter_FOUND AND NOT CMAKE_CROSSCOMPILING) OR _${_PYTHON_PREFIX}_CONFIG)
+ _python_get_config_var (_${_PYTHON_PREFIX}_INCLUDE_DIRS INCLUDES)
+
+ find_path (_${_PYTHON_PREFIX}_INCLUDE_DIR
+ NAMES ${_${_PYTHON_PREFIX}_INCLUDE_NAMES}
+ HINTS ${_${_PYTHON_PREFIX}_INCLUDE_DIRS}
+ NO_SYSTEM_ENVIRONMENT_PATH
+ NO_CMAKE_SYSTEM_PATH)
+ endif()
+
+ # Rely on HINTS and standard paths if interpreter or config tool failed to locate artifacts
+ if (NOT _${_PYTHON_PREFIX}_INCLUDE_DIR)
+ unset (_${_PYTHON_PREFIX}_VIRTUALENV_PATHS)
+ if (_${_PYTHON_PREFIX}_FIND_VIRTUALENV MATCHES "^(FIRST|ONLY)$")
+ set (_${_PYTHON_PREFIX}_VIRTUALENV_PATHS ENV VIRTUAL_ENV ENV CONDA_PREFIX)
+ endif()
+ unset (_${_PYTHON_PREFIX}_INCLUDE_HINTS)
+
+ if (_${_PYTHON_PREFIX}_LIBRARY_RELEASE)
+ # Use the library's install prefix as a hint
+ if (_${_PYTHON_PREFIX}_LIBRARY_RELEASE MATCHES "^(.+/Frameworks/Python.framework/Versions/[0-9.]+)")
+ list (APPEND _${_PYTHON_PREFIX}_INCLUDE_HINTS "${CMAKE_MATCH_1}")
+ elseif (_${_PYTHON_PREFIX}_LIBRARY_RELEASE MATCHES "^(.+)/lib(64|32)?/python[0-9.]+/config")
+ list (APPEND _${_PYTHON_PREFIX}_INCLUDE_HINTS "${CMAKE_MATCH_1}")
+ elseif (DEFINED CMAKE_LIBRARY_ARCHITECTURE AND ${_${_PYTHON_PREFIX}_LIBRARY_RELEASE} MATCHES "^(.+)/lib/${CMAKE_LIBRARY_ARCHITECTURE}")
+ list (APPEND _${_PYTHON_PREFIX}_INCLUDE_HINTS "${CMAKE_MATCH_1}")
+ else()
+ # assume library is in a directory under root
+ get_filename_component (_${_PYTHON_PREFIX}_PREFIX "${_${_PYTHON_PREFIX}_LIBRARY_RELEASE}" DIRECTORY)
+ get_filename_component (_${_PYTHON_PREFIX}_PREFIX "${_${_PYTHON_PREFIX}_PREFIX}" DIRECTORY)
+ list (APPEND _${_PYTHON_PREFIX}_INCLUDE_HINTS "${_${_PYTHON_PREFIX}_PREFIX}")
+ endif()
+ endif()
+
+ _python_get_frameworks (_${_PYTHON_PREFIX}_FRAMEWORK_PATHS VERSION ${_${_PYTHON_PREFIX}_VERSION})
+ _python_get_registries (_${_PYTHON_PREFIX}_REGISTRY_PATHS VERSION ${_${_PYTHON_PREFIX}_VERSION})
+ _python_get_path_suffixes (_${_PYTHON_PREFIX}_PATH_SUFFIXES VERSION ${_${_PYTHON_PREFIX}_VERSION} INCLUDE)
+
+ if (APPLE AND _${_PYTHON_PREFIX}_FIND_FRAMEWORK STREQUAL "FIRST")
+ find_path (_${_PYTHON_PREFIX}_INCLUDE_DIR
+ NAMES ${_${_PYTHON_PREFIX}_INCLUDE_NAMES}
+ HINTS ${_${_PYTHON_PREFIX}_INCLUDE_HINTS} ${_${_PYTHON_PREFIX}_HINTS}
+ PATHS ${_${_PYTHON_PREFIX}_VIRTUALENV_PATHS}
+ ${_${_PYTHON_PREFIX}_FRAMEWORK_PATHS}
+ PATH_SUFFIXES ${_${_PYTHON_PREFIX}_PATH_SUFFIXES}
+ NO_CMAKE_PATH
+ NO_CMAKE_ENVIRONMENT_PATH
+ NO_SYSTEM_ENVIRONMENT_PATH
+ NO_CMAKE_SYSTEM_PATH)
+ endif()
+
+ if (WIN32 AND _${_PYTHON_PREFIX}_FIND_REGISTRY STREQUAL "FIRST")
+ find_path (_${_PYTHON_PREFIX}_INCLUDE_DIR
+ NAMES ${_${_PYTHON_PREFIX}_INCLUDE_NAMES}
+ HINTS ${_${_PYTHON_PREFIX}_INCLUDE_HINTS} ${_${_PYTHON_PREFIX}_HINTS}
+ PATHS ${_${_PYTHON_PREFIX}_VIRTUALENV_PATHS}
+ ${_${_PYTHON_PREFIX}_REGISTRY_PATHS}
+ PATH_SUFFIXES ${_${_PYTHON_PREFIX}_PATH_SUFFIXES}
+ NO_SYSTEM_ENVIRONMENT_PATH
+ NO_CMAKE_SYSTEM_PATH)
+ endif()
+
+ if (APPLE AND _${_PYTHON_PREFIX}_FIND_FRAMEWORK STREQUAL "LAST")
+ set (__${_PYTHON_PREFIX}_FRAMEWORK_PATHS ${_${_PYTHON_PREFIX}_FRAMEWORK_PATHS})
+ else()
+ unset (__${_PYTHON_PREFIX}_FRAMEWORK_PATHS)
+ endif()
+
+ if (WIN32 AND _${_PYTHON_PREFIX}_FIND_REGISTRY STREQUAL "LAST")
+ set (__${_PYTHON_PREFIX}_REGISTRY_PATHS ${_${_PYTHON_PREFIX}_REGISTRY_PATHS})
+ else()
+ unset (__${_PYTHON_PREFIX}_REGISTRY_PATHS)
+ endif()
+
+ find_path (_${_PYTHON_PREFIX}_INCLUDE_DIR
+ NAMES ${_${_PYTHON_PREFIX}_INCLUDE_NAMES}
+ HINTS ${_${_PYTHON_PREFIX}_INCLUDE_HINTS} ${_${_PYTHON_PREFIX}_HINTS}
+ PATHS ${_${_PYTHON_PREFIX}_VIRTUALENV_PATHS}
+ ${__${_PYTHON_PREFIX}_FRAMEWORK_PATHS}
+ ${__${_PYTHON_PREFIX}_REGISTRY_PATHS}
+ PATH_SUFFIXES ${_${_PYTHON_PREFIX}_PATH_SUFFIXES}
+ NO_SYSTEM_ENVIRONMENT_PATH
+ NO_CMAKE_SYSTEM_PATH)
+ endif()
+
+ # search header file in standard locations
+ find_path (_${_PYTHON_PREFIX}_INCLUDE_DIR
+ NAMES ${_${_PYTHON_PREFIX}_INCLUDE_NAMES})
+
+ break()
+ endwhile()
+
+ set (${_PYTHON_PREFIX}_INCLUDE_DIRS "${_${_PYTHON_PREFIX}_INCLUDE_DIR}")
+
+ if (_${_PYTHON_PREFIX}_INCLUDE_DIR AND NOT EXISTS "${_${_PYTHON_PREFIX}_INCLUDE_DIR}")
+ set (_${_PYTHON_PREFIX}_Development_REASON_FAILURE "Cannot find the directory \"${_${_PYTHON_PREFIX}_INCLUDE_DIR}\"")
+ set_property (CACHE _${_PYTHON_PREFIX}_INCLUDE_DIR PROPERTY VALUE "${_PYTHON_PREFIX}_INCLUDE_DIR-NOTFOUND")
+ endif()
+
+ if (_${_PYTHON_PREFIX}_INCLUDE_DIR)
+ # retrieve version from header file
+ _python_get_version (INCLUDE PREFIX _${_PYTHON_PREFIX}_INC_)
+ if (_${_PYTHON_PREFIX}_LIBRARY_RELEASE)
+ if ("${_${_PYTHON_PREFIX}_INC_VERSION_MAJOR}.${_${_PYTHON_PREFIX}_INC_VERSION_MINOR}"
+ VERSION_EQUAL _${_PYTHON_PREFIX}_VERSION)
+ # update versioning
+ set (_${_PYTHON_PREFIX}_VERSION ${_${_PYTHON_PREFIX}_INC_VERSION})
+ set (_${_PYTHON_PREFIX}_VERSION_PATCH ${_${_PYTHON_PREFIX}_INC_VERSION_PATCH})
+ endif()
+ else()
+ set (_${_PYTHON_PREFIX}_VERSION ${_${_PYTHON_PREFIX}_INC_VERSION})
+ set (_${_PYTHON_PREFIX}_VERSION_MAJOR ${_${_PYTHON_PREFIX}_INC_VERSION_MAJOR})
+ set (_${_PYTHON_PREFIX}_VERSION_MINOR ${_${_PYTHON_PREFIX}_INC_VERSION_MINOR})
+ set (_${_PYTHON_PREFIX}_VERSION_PATCH ${_${_PYTHON_PREFIX}_INC_VERSION_PATCH})
+ endif()
+ endif()
+ endif()
+
+ if (NOT ${_PYTHON_PREFIX}_Interpreter_FOUND AND NOT ${_PYTHON_PREFIX}_Compiler_FOUND)
+ # set public version information
+ set (${_PYTHON_PREFIX}_VERSION ${_${_PYTHON_PREFIX}_VERSION})
+ set (${_PYTHON_PREFIX}_VERSION_MAJOR ${_${_PYTHON_PREFIX}_VERSION_MAJOR})
+ set (${_PYTHON_PREFIX}_VERSION_MINOR ${_${_PYTHON_PREFIX}_VERSION_MINOR})
+ set (${_PYTHON_PREFIX}_VERSION_PATCH ${_${_PYTHON_PREFIX}_VERSION_PATCH})
+ endif()
+
+ # define public variables
+ if ("LIBRARY" IN_LIST _${_PYTHON_PREFIX}_FIND_DEVELOPMENT_ARTIFACTS)
+ set (${_PYTHON_PREFIX}_LIBRARY_DEBUG "${_${_PYTHON_PREFIX}_LIBRARY_DEBUG}")
+ _python_select_library_configurations (${_PYTHON_PREFIX})
+
+ set (${_PYTHON_PREFIX}_RUNTIME_LIBRARY_RELEASE "${_${_PYTHON_PREFIX}_RUNTIME_LIBRARY_RELEASE}")
+ set (${_PYTHON_PREFIX}_RUNTIME_LIBRARY_DEBUG "${_${_PYTHON_PREFIX}_RUNTIME_LIBRARY_DEBUG}")
+
+ if (_${_PYTHON_PREFIX}_RUNTIME_LIBRARY_RELEASE)
+ set (${_PYTHON_PREFIX}_RUNTIME_LIBRARY "${_${_PYTHON_PREFIX}_RUNTIME_LIBRARY_RELEASE}")
+ elseif (_${_PYTHON_PREFIX}_RUNTIME_LIBRARY_DEBUG)
+ set (${_PYTHON_PREFIX}_RUNTIME_LIBRARY "${_${_PYTHON_PREFIX}_RUNTIME_LIBRARY_DEBUG}")
+ else()
+ set (${_PYTHON_PREFIX}_RUNTIME_LIBRARY "${_PYTHON_PREFIX}_RUNTIME_LIBRARY-NOTFOUND")
+ endif()
+
+ _python_set_library_dirs (${_PYTHON_PREFIX}_LIBRARY_DIRS
+ _${_PYTHON_PREFIX}_LIBRARY_RELEASE
+ _${_PYTHON_PREFIX}_LIBRARY_DEBUG)
+ if (UNIX)
+ if (_${_PYTHON_PREFIX}_LIBRARY_RELEASE MATCHES "${CMAKE_SHARED_LIBRARY_SUFFIX}$")
+ set (${_PYTHON_PREFIX}_RUNTIME_LIBRARY_DIRS ${${_PYTHON_PREFIX}_LIBRARY_DIRS})
+ endif()
+ else()
+ _python_set_library_dirs (${_PYTHON_PREFIX}_RUNTIME_LIBRARY_DIRS
+ _${_PYTHON_PREFIX}_RUNTIME_LIBRARY_RELEASE
+ _${_PYTHON_PREFIX}_RUNTIME_LIBRARY_DEBUG)
+ endif()
+ endif()
+
+ if (_${_PYTHON_PREFIX}_LIBRARY_RELEASE OR _${_PYTHON_PREFIX}_INCLUDE_DIR)
+ if (${_PYTHON_PREFIX}_Interpreter_FOUND OR ${_PYTHON_PREFIX}_Compiler_FOUND)
+ # development environment must be compatible with interpreter/compiler
+ if ("${_${_PYTHON_PREFIX}_VERSION_MAJOR}.${_${_PYTHON_PREFIX}_VERSION_MINOR}" VERSION_EQUAL "${${_PYTHON_PREFIX}_VERSION_MAJOR}.${${_PYTHON_PREFIX}_VERSION_MINOR}"
+ AND "${_${_PYTHON_PREFIX}_INC_VERSION_MAJOR}.${_${_PYTHON_PREFIX}_INC_VERSION_MINOR}" VERSION_EQUAL "${_${_PYTHON_PREFIX}_VERSION_MAJOR}.${_${_PYTHON_PREFIX}_VERSION_MINOR}")
+ _python_set_development_module_found (Module)
+ _python_set_development_module_found (Embed)
+ endif()
+ elseif (${_PYTHON_PREFIX}_VERSION_MAJOR VERSION_EQUAL _${_PYTHON_PREFIX}_REQUIRED_VERSION_MAJOR
+ AND "${_${_PYTHON_PREFIX}_INC_VERSION_MAJOR}.${_${_PYTHON_PREFIX}_INC_VERSION_MINOR}" VERSION_EQUAL "${_${_PYTHON_PREFIX}_VERSION_MAJOR}.${_${_PYTHON_PREFIX}_VERSION_MINOR}")
+ _python_set_development_module_found (Module)
+ _python_set_development_module_found (Embed)
+ endif()
+ if (DEFINED _${_PYTHON_PREFIX}_FIND_ABI AND
+ (NOT _${_PYTHON_PREFIX}_ABI IN_LIST _${_PYTHON_PREFIX}_ABIFLAGS
+ OR NOT _${_PYTHON_PREFIX}_INC_ABI IN_LIST _${_PYTHON_PREFIX}_ABIFLAGS))
+ set (${_PYTHON_PREFIX}_Development.Module_FOUND FALSE)
+ set (${_PYTHON_PREFIX}_Development.Embed_FOUND FALSE)
+ endif()
+ endif()
+
+ if (( ${_PYTHON_PREFIX}_Development.Module_FOUND
+ AND ${_PYTHON_PREFIX}_Development.Embed_FOUND)
+ OR (NOT "Development.Module" IN_LIST ${_PYTHON_PREFIX}_FIND_COMPONENTS
+ AND ${_PYTHON_PREFIX}_Development.Embed_FOUND)
+ OR (NOT "Development.Embed" IN_LIST ${_PYTHON_PREFIX}_FIND_COMPONENTS
+ AND ${_PYTHON_PREFIX}_Development.Module_FOUND))
+ unset (_${_PYTHON_PREFIX}_Development_REASON_FAILURE)
+ endif()
+
+ if ("Development" IN_LIST ${_PYTHON_PREFIX}_FIND_COMPONENTS
+ AND ${_PYTHON_PREFIX}_Development.Module_FOUND
+ AND ${_PYTHON_PREFIX}_Development.Embed_FOUND)
+ set (${_PYTHON_PREFIX}_Development_FOUND TRUE)
+ endif()
+
+ if ((${_PYTHON_PREFIX}_Development.Module_FOUND
+ OR ${_PYTHON_PREFIX}_Development.Embed_FOUND)
+ AND EXISTS "${_${_PYTHON_PREFIX}_INCLUDE_DIR}/PyPy.h")
+ # retrieve PyPy version
+ file (STRINGS "${_${_PYTHON_PREFIX}_INCLUDE_DIR}/patchlevel.h" ${_PYTHON_PREFIX}_PyPy_VERSION
+ REGEX "^#define[ \t]+PYPY_VERSION[ \t]+\"[^\"]+\"")
+ string (REGEX REPLACE "^#define[ \t]+PYPY_VERSION[ \t]+\"([^\"]+)\".*" "\\1"
+ ${_PYTHON_PREFIX}_PyPy_VERSION "${${_PYTHON_PREFIX}_PyPy_VERSION}")
+ endif()
+
+ unset(${_PYTHON_PREFIX}_LINK_OPTIONS)
+ if (${_PYTHON_PREFIX}_Development.Embed_FOUND AND APPLE
+ AND ${_PYTHON_PREFIX}_LIBRARY_RELEASE MATCHES "${CMAKE_SHARED_LIBRARY_SUFFIX}$")
+ # rpath must be specified if python is part of a framework
+ unset(_${_PYTHON_PREFIX}_is_prefix)
+ foreach (_${_PYTHON_PREFIX}_implementation IN LISTS _${_PYTHON_PREFIX}_FIND_IMPLEMENTATIONS)
+ foreach (_${_PYTHON_PREFIX}_framework IN LISTS _${_PYTHON_PREFIX}_${_${_PYTHON_PREFIX}_implementation}_FRAMEWORKS)
+ if (${_PYTHON_PREFIX}_LIBRARY_RELEASE MATCHES "^${_${_PYTHON_PREFIX}_framework}")
+ get_filename_component (_${_PYTHON_PREFIX}_framework "${_${_PYTHON_PREFIX}_framework}" DIRECTORY)
+ set (${_PYTHON_PREFIX}_LINK_OPTIONS "LINKER:-rpath,${_${_PYTHON_PREFIX}_framework}")
+ break()
+ endif()
+ endforeach()
+ if (_${_PYTHON_PREFIX}_is_prefix)
+ break()
+ endif()
+ endforeach()
+ unset(_${_PYTHON_PREFIX}_implementation)
+ unset(_${_PYTHON_PREFIX}_framework)
+ unset(_${_PYTHON_PREFIX}_is_prefix)
+ endif()
+
+ if (NOT DEFINED ${_PYTHON_PREFIX}_SOABI)
+ _python_get_config_var (${_PYTHON_PREFIX}_SOABI SOABI)
+ endif()
+
+ _python_compute_development_signature (Module)
+ _python_compute_development_signature (Embed)
+
+ # Restore the original find library ordering
+ if (DEFINED _${_PYTHON_PREFIX}_CMAKE_FIND_LIBRARY_SUFFIXES)
+ set (CMAKE_FIND_LIBRARY_SUFFIXES ${_${_PYTHON_PREFIX}_CMAKE_FIND_LIBRARY_SUFFIXES})
+ endif()
+
+ if (${_PYTHON_PREFIX}_ARTIFACTS_INTERACTIVE)
+ if ("LIBRARY" IN_LIST _${_PYTHON_PREFIX}_FIND_DEVELOPMENT_ARTIFACTS)
+ set (${_PYTHON_PREFIX}_LIBRARY "${_${_PYTHON_PREFIX}_LIBRARY_RELEASE}" CACHE FILEPATH "${_PYTHON_PREFIX} Library")
+ endif()
+ if ("INCLUDE_DIR" IN_LIST _${_PYTHON_PREFIX}_FIND_DEVELOPMENT_ARTIFACTS)
+ set (${_PYTHON_PREFIX}_INCLUDE_DIR "${_${_PYTHON_PREFIX}_INCLUDE_DIR}" CACHE FILEPATH "${_PYTHON_PREFIX} Include Directory")
+ endif()
+ endif()
+
+ _python_mark_as_internal (_${_PYTHON_PREFIX}_LIBRARY_RELEASE
+ _${_PYTHON_PREFIX}_LIBRARY_DEBUG
+ _${_PYTHON_PREFIX}_RUNTIME_LIBRARY_RELEASE
+ _${_PYTHON_PREFIX}_RUNTIME_LIBRARY_DEBUG
+ _${_PYTHON_PREFIX}_INCLUDE_DIR
+ _${_PYTHON_PREFIX}_CONFIG
+ _${_PYTHON_PREFIX}_DEVELOPMENT_MODULE_SIGNATURE
+ _${_PYTHON_PREFIX}_DEVELOPMENT_EMBED_SIGNATURE)
+endif()
+
+if (${_PYTHON_PREFIX}_FIND_REQUIRED_NumPy)
+ list (APPEND _${_PYTHON_PREFIX}_REQUIRED_VARS ${_PYTHON_PREFIX}_NumPy_INCLUDE_DIRS)
+endif()
+if ("NumPy" IN_LIST ${_PYTHON_PREFIX}_FIND_COMPONENTS AND ${_PYTHON_PREFIX}_Interpreter_FOUND)
+ list (APPEND _${_PYTHON_PREFIX}_CACHED_VARS _${_PYTHON_PREFIX}_NumPy_INCLUDE_DIR)
+
+ if (DEFINED ${_PYTHON_PREFIX}_NumPy_INCLUDE_DIR
+ AND IS_ABSOLUTE "${${_PYTHON_PREFIX}_NumPy_INCLUDE_DIR}")
+ set (_${_PYTHON_PREFIX}_NumPy_INCLUDE_DIR "${${_PYTHON_PREFIX}_NumPy_INCLUDE_DIR}" CACHE INTERNAL "")
+ elseif (DEFINED _${_PYTHON_PREFIX}_NumPy_INCLUDE_DIR)
+ # compute numpy signature. Depends on interpreter and development signatures
+ string (MD5 __${_PYTHON_PREFIX}_NUMPY_SIGNATURE "${_${_PYTHON_PREFIX}_INTERPRETER_SIGNATURE}:${_${_PYTHON_PREFIX}_DEVELOPMENT_MODULE_SIGNATURE}:${_${_PYTHON_PREFIX}_NumPy_INCLUDE_DIR}")
+ if (NOT __${_PYTHON_PREFIX}_NUMPY_SIGNATURE STREQUAL _${_PYTHON_PREFIX}_NUMPY_SIGNATURE
+ OR NOT EXISTS "${_${_PYTHON_PREFIX}_NumPy_INCLUDE_DIR}")
+ unset (_${_PYTHON_PREFIX}_NumPy_INCLUDE_DIR CACHE)
+ unset (_${_PYTHON_PREFIX}_NUMPY_SIGNATURE CACHE)
+ endif()
+ endif()
+
+ if (NOT _${_PYTHON_PREFIX}_NumPy_INCLUDE_DIR)
+ execute_process(COMMAND ${${_PYTHON_PREFIX}_INTERPRETER_LAUNCHER} "${_${_PYTHON_PREFIX}_EXECUTABLE}" -c
+ "import sys\ntry: import numpy; sys.stdout.write(numpy.get_include())\nexcept:pass\n"
+ RESULT_VARIABLE _${_PYTHON_PREFIX}_RESULT
+ OUTPUT_VARIABLE _${_PYTHON_PREFIX}_NumPy_PATH
+ ERROR_QUIET
+ OUTPUT_STRIP_TRAILING_WHITESPACE)
+
+ if (NOT _${_PYTHON_PREFIX}_RESULT)
+ find_path (_${_PYTHON_PREFIX}_NumPy_INCLUDE_DIR
+ NAMES "numpy/arrayobject.h" "numpy/numpyconfig.h"
+ HINTS "${_${_PYTHON_PREFIX}_NumPy_PATH}"
+ NO_DEFAULT_PATH)
+ endif()
+ endif()
+
+ set (${_PYTHON_PREFIX}_NumPy_INCLUDE_DIRS "${_${_PYTHON_PREFIX}_NumPy_INCLUDE_DIR}")
+
+ if(_${_PYTHON_PREFIX}_NumPy_INCLUDE_DIR AND NOT EXISTS "${_${_PYTHON_PREFIX}_NumPy_INCLUDE_DIR}")
+ set (_${_PYTHON_PREFIX}_NumPy_REASON_FAILURE "Cannot find the directory \"${_${_PYTHON_PREFIX}_NumPy_INCLUDE_DIR}\"")
+ set_property (CACHE _${_PYTHON_PREFIX}_NumPy_INCLUDE_DIR PROPERTY VALUE "${_PYTHON_PREFIX}_NumPy_INCLUDE_DIR-NOTFOUND")
+ endif()
+
+ if (_${_PYTHON_PREFIX}_NumPy_INCLUDE_DIR)
+ execute_process (COMMAND ${${_PYTHON_PREFIX}_INTERPRETER_LAUNCHER} "${_${_PYTHON_PREFIX}_EXECUTABLE}" -c
+ "import sys\ntry: import numpy; sys.stdout.write(numpy.__version__)\nexcept:pass\n"
+ RESULT_VARIABLE _${_PYTHON_PREFIX}_RESULT
+ OUTPUT_VARIABLE _${_PYTHON_PREFIX}_NumPy_VERSION)
+ if (NOT _${_PYTHON_PREFIX}_RESULT)
+ set (${_PYTHON_PREFIX}_NumPy_VERSION "${_${_PYTHON_PREFIX}_NumPy_VERSION}")
+ else()
+ unset (${_PYTHON_PREFIX}_NumPy_VERSION)
+ endif()
+
+ # final step: set NumPy founded only if Development.Module component is founded as well
+ set(${_PYTHON_PREFIX}_NumPy_FOUND ${${_PYTHON_PREFIX}_Development.Module_FOUND})
+ else()
+ set (${_PYTHON_PREFIX}_NumPy_FOUND FALSE)
+ endif()
+
+ if (${_PYTHON_PREFIX}_NumPy_FOUND)
+ unset (_${_PYTHON_PREFIX}_NumPy_REASON_FAILURE)
+
+ # compute and save numpy signature
+ string (MD5 __${_PYTHON_PREFIX}_NUMPY_SIGNATURE "${_${_PYTHON_PREFIX}_INTERPRETER_SIGNATURE}:${_${_PYTHON_PREFIX}_DEVELOPMENT_MODULE_SIGNATURE}:${${_PYTHON_PREFIX}_NumPyINCLUDE_DIR}")
+ set (_${_PYTHON_PREFIX}_NUMPY_SIGNATURE "${__${_PYTHON_PREFIX}_NUMPY_SIGNATURE}" CACHE INTERNAL "")
+ else()
+ unset (_${_PYTHON_PREFIX}_NUMPY_SIGNATURE CACHE)
+ endif()
+
+ if (${_PYTHON_PREFIX}_ARTIFACTS_INTERACTIVE)
+ set (${_PYTHON_PREFIX}_NumPy_INCLUDE_DIR "${_${_PYTHON_PREFIX}_NumPy_INCLUDE_DIR}" CACHE FILEPATH "${_PYTHON_PREFIX} NumPy Include Directory")
+ endif()
+
+ _python_mark_as_internal (_${_PYTHON_PREFIX}_NumPy_INCLUDE_DIR
+ _${_PYTHON_PREFIX}_NUMPY_SIGNATURE)
+endif()
+
+# final validation
+if (${_PYTHON_PREFIX}_VERSION_MAJOR AND
+ NOT ${_PYTHON_PREFIX}_VERSION_MAJOR VERSION_EQUAL _${_PYTHON_PREFIX}_REQUIRED_VERSION_MAJOR)
+ _python_display_failure ("Could NOT find ${_PYTHON_PREFIX}: Found unsuitable major version \"${${_PYTHON_PREFIX}_VERSION_MAJOR}\", but required major version is exact version \"${_${_PYTHON_PREFIX}_REQUIRED_VERSION_MAJOR}\"")
+
+ cmake_policy(POP)
+ return()
+endif()
+
+unset (_${_PYTHON_PREFIX}_REASON_FAILURE)
+foreach (_${_PYTHON_PREFIX}_COMPONENT IN ITEMS Interpreter Compiler Development NumPy)
+ if (_${_PYTHON_PREFIX}_${_${_PYTHON_PREFIX}_COMPONENT}_REASON_FAILURE)
+ string (APPEND _${_PYTHON_PREFIX}_REASON_FAILURE "\n ${_${_PYTHON_PREFIX}_COMPONENT}: ${_${_PYTHON_PREFIX}_${_${_PYTHON_PREFIX}_COMPONENT}_REASON_FAILURE}")
+ unset (_${_PYTHON_PREFIX}_${_${_PYTHON_PREFIX}_COMPONENT}_REASON_FAILURE)
+ endif()
+endforeach()
+
+find_package_handle_standard_args (${_PYTHON_PREFIX}
+ REQUIRED_VARS ${_${_PYTHON_PREFIX}_REQUIRED_VARS}
+ VERSION_VAR ${_PYTHON_PREFIX}_VERSION
+ HANDLE_COMPONENTS)
+
+# Create imported targets and helper functions
+if(_${_PYTHON_PREFIX}_CMAKE_ROLE STREQUAL "PROJECT")
+ if ("Interpreter" IN_LIST ${_PYTHON_PREFIX}_FIND_COMPONENTS
+ AND ${_PYTHON_PREFIX}_Interpreter_FOUND
+ AND NOT TARGET ${_PYTHON_PREFIX}::Interpreter)
+ add_executable (${_PYTHON_PREFIX}::Interpreter IMPORTED)
+ set_property (TARGET ${_PYTHON_PREFIX}::Interpreter
+ PROPERTY IMPORTED_LOCATION "${${_PYTHON_PREFIX}_EXECUTABLE}")
+ endif()
+
+ if ("Compiler" IN_LIST ${_PYTHON_PREFIX}_FIND_COMPONENTS
+ AND ${_PYTHON_PREFIX}_Compiler_FOUND
+ AND NOT TARGET ${_PYTHON_PREFIX}::Compiler)
+ add_executable (${_PYTHON_PREFIX}::Compiler IMPORTED)
+ set_property (TARGET ${_PYTHON_PREFIX}::Compiler
+ PROPERTY IMPORTED_LOCATION "${${_PYTHON_PREFIX}_COMPILER}")
+ endif()
+
+ if (("Development.Module" IN_LIST ${_PYTHON_PREFIX}_FIND_COMPONENTS
+ AND ${_PYTHON_PREFIX}_Development.Module_FOUND)
+ OR ("Development.Embed" IN_LIST ${_PYTHON_PREFIX}_FIND_COMPONENTS
+ AND ${_PYTHON_PREFIX}_Development.Embed_FOUND))
+
+ macro (__PYTHON_IMPORT_LIBRARY __name)
+ if (${_PYTHON_PREFIX}_LIBRARY_RELEASE MATCHES "${CMAKE_SHARED_LIBRARY_SUFFIX}$"
+ OR ${_PYTHON_PREFIX}_RUNTIME_LIBRARY_RELEASE)
+ set (_${_PYTHON_PREFIX}_LIBRARY_TYPE SHARED)
+ else()
+ set (_${_PYTHON_PREFIX}_LIBRARY_TYPE STATIC)
+ endif()
+
+ if (NOT TARGET ${__name})
+ add_library (${__name} ${_${_PYTHON_PREFIX}_LIBRARY_TYPE} IMPORTED)
+ endif()
+
+ set_property (TARGET ${__name}
+ PROPERTY INTERFACE_INCLUDE_DIRECTORIES "${${_PYTHON_PREFIX}_INCLUDE_DIRS}")
+
+ if (${_PYTHON_PREFIX}_LIBRARY_RELEASE AND ${_PYTHON_PREFIX}_RUNTIME_LIBRARY_RELEASE)
+ # System manage shared libraries in two parts: import and runtime
+ if (${_PYTHON_PREFIX}_LIBRARY_RELEASE AND ${_PYTHON_PREFIX}_LIBRARY_DEBUG)
+ set_property (TARGET ${__name} PROPERTY IMPORTED_CONFIGURATIONS RELEASE DEBUG)
+ set_target_properties (${__name}
+ PROPERTIES IMPORTED_LINK_INTERFACE_LANGUAGES_RELEASE "C"
+ IMPORTED_IMPLIB_RELEASE "${${_PYTHON_PREFIX}_LIBRARY_RELEASE}"
+ IMPORTED_LOCATION_RELEASE "${${_PYTHON_PREFIX}_RUNTIME_LIBRARY_RELEASE}")
+ set_target_properties (${__name}
+ PROPERTIES IMPORTED_LINK_INTERFACE_LANGUAGES_DEBUG "C"
+ IMPORTED_IMPLIB_DEBUG "${${_PYTHON_PREFIX}_LIBRARY_DEBUG}"
+ IMPORTED_LOCATION_DEBUG "${${_PYTHON_PREFIX}_RUNTIME_LIBRARY_DEBUG}")
+ else()
+ set_target_properties (${__name}
+ PROPERTIES IMPORTED_LINK_INTERFACE_LANGUAGES "C"
+ IMPORTED_IMPLIB "${${_PYTHON_PREFIX}_LIBRARIES}"
+ IMPORTED_LOCATION "${${_PYTHON_PREFIX}_RUNTIME_LIBRARY_RELEASE}")
+ endif()
+ else()
+ if (${_PYTHON_PREFIX}_LIBRARY_RELEASE AND ${_PYTHON_PREFIX}_LIBRARY_DEBUG)
+ set_property (TARGET ${__name} PROPERTY IMPORTED_CONFIGURATIONS RELEASE DEBUG)
+ set_target_properties (${__name}
+ PROPERTIES IMPORTED_LINK_INTERFACE_LANGUAGES_RELEASE "C"
+ IMPORTED_LOCATION_RELEASE "${${_PYTHON_PREFIX}_LIBRARY_RELEASE}")
+ set_target_properties (${__name}
+ PROPERTIES IMPORTED_LINK_INTERFACE_LANGUAGES_DEBUG "C"
+ IMPORTED_LOCATION_DEBUG "${${_PYTHON_PREFIX}_LIBRARY_DEBUG}")
+ else()
+ set_target_properties (${__name}
+ PROPERTIES IMPORTED_LINK_INTERFACE_LANGUAGES "C"
+ IMPORTED_LOCATION "${${_PYTHON_PREFIX}_LIBRARY_RELEASE}")
+ endif()
+ endif()
+
+ if (_${_PYTHON_PREFIX}_LIBRARY_TYPE STREQUAL "STATIC")
+ # extend link information with dependent libraries
+ _python_get_config_var (_${_PYTHON_PREFIX}_LINK_LIBRARIES LIBS)
+ if (_${_PYTHON_PREFIX}_LINK_LIBRARIES)
+ set_property (TARGET ${__name}
+ PROPERTY INTERFACE_LINK_LIBRARIES ${_${_PYTHON_PREFIX}_LINK_LIBRARIES})
+ endif()
+ endif()
+
+ if (${_PYTHON_PREFIX}_LINK_OPTIONS
+ AND _${_PYTHON_PREFIX}_LIBRARY_TYPE STREQUAL "SHARED")
+ set_property (TARGET ${__name} PROPERTY INTERFACE_LINK_OPTIONS "${${_PYTHON_PREFIX}_LINK_OPTIONS}")
+ endif()
+ endmacro()
+
+ if (${_PYTHON_PREFIX}_Development.Embed_FOUND)
+ __python_import_library (${_PYTHON_PREFIX}::Python)
+ endif()
+
+ if (${_PYTHON_PREFIX}_Development.Module_FOUND)
+ if ("LIBRARY" IN_LIST _${_PYTHON_PREFIX}_FIND_DEVELOPMENT_MODULE_ARTIFACTS)
+ # On Windows/CYGWIN/MSYS, Python::Module is the same as Python::Python
+ # but ALIAS cannot be used because the imported library is not GLOBAL.
+ __python_import_library (${_PYTHON_PREFIX}::Module)
+ else()
+ if (NOT TARGET ${_PYTHON_PREFIX}::Module)
+ add_library (${_PYTHON_PREFIX}::Module INTERFACE IMPORTED)
+ endif()
+ set_property (TARGET ${_PYTHON_PREFIX}::Module
+ PROPERTY INTERFACE_INCLUDE_DIRECTORIES "${${_PYTHON_PREFIX}_INCLUDE_DIRS}")
+
+ # When available, enforce shared library generation with undefined symbols
+ if (APPLE)
+ set_property (TARGET ${_PYTHON_PREFIX}::Module
+ PROPERTY INTERFACE_LINK_OPTIONS "LINKER:-undefined,dynamic_lookup")
+ endif()
+ if (CMAKE_SYSTEM_NAME STREQUAL "SunOS")
+ set_property (TARGET ${_PYTHON_PREFIX}::Module
+ PROPERTY INTERFACE_LINK_OPTIONS "LINKER:-z,nodefs")
+ endif()
+ if (CMAKE_SYSTEM_NAME STREQUAL "AIX")
+ set_property (TARGET ${_PYTHON_PREFIX}::Module
+ PROPERTY INTERFACE_LINK_OPTIONS "LINKER:-b,erok")
+ endif()
+ endif()
+ endif()
+
+ #
+ # PYTHON_ADD_LIBRARY (<name> [STATIC|SHARED|MODULE] src1 src2 ... srcN)
+ # It is used to build modules for python.
+ #
+ function (__${_PYTHON_PREFIX}_ADD_LIBRARY prefix name)
+ cmake_parse_arguments (PARSE_ARGV 2 PYTHON_ADD_LIBRARY "STATIC;SHARED;MODULE;WITH_SOABI" "" "")
+
+ if (PYTHON_ADD_LIBRARY_STATIC)
+ set (type STATIC)
+ elseif (PYTHON_ADD_LIBRARY_SHARED)
+ set (type SHARED)
+ else()
+ set (type MODULE)
+ endif()
+
+ if (type STREQUAL "MODULE" AND NOT TARGET ${prefix}::Module)
+ message (SEND_ERROR "${prefix}_ADD_LIBRARY: dependent target '${prefix}::Module' is not defined.\n Did you miss to request COMPONENT 'Development.Module'?")
+ return()
+ endif()
+ if (NOT type STREQUAL "MODULE" AND NOT TARGET ${prefix}::Python)
+ message (SEND_ERROR "${prefix}_ADD_LIBRARY: dependent target '${prefix}::Python' is not defined.\n Did you miss to request COMPONENT 'Development.Embed'?")
+ return()
+ endif()
+
+ add_library (${name} ${type} ${PYTHON_ADD_LIBRARY_UNPARSED_ARGUMENTS})
+
+ get_property (type TARGET ${name} PROPERTY TYPE)
+
+ if (type STREQUAL "MODULE_LIBRARY")
+ target_link_libraries (${name} PRIVATE ${prefix}::Module)
+ # customize library name to follow module name rules
+ set_property (TARGET ${name} PROPERTY PREFIX "")
+ if(CMAKE_SYSTEM_NAME STREQUAL "Windows")
+ set_property (TARGET ${name} PROPERTY SUFFIX ".pyd")
+ endif()
+
+ if (PYTHON_ADD_LIBRARY_WITH_SOABI AND ${prefix}_SOABI)
+ get_property (suffix TARGET ${name} PROPERTY SUFFIX)
+ if (NOT suffix)
+ set (suffix "${CMAKE_SHARED_MODULE_SUFFIX}")
+ endif()
+ set_property (TARGET ${name} PROPERTY SUFFIX ".${${prefix}_SOABI}${suffix}")
+ endif()
+ else()
+ if (PYTHON_ADD_LIBRARY_WITH_SOABI)
+ message (AUTHOR_WARNING "Find${prefix}: Option `WITH_SOABI` is only supported for `MODULE` library type.")
+ endif()
+ target_link_libraries (${name} PRIVATE ${prefix}::Python)
+ endif()
+ endfunction()
+ endif()
+
+ if ("NumPy" IN_LIST ${_PYTHON_PREFIX}_FIND_COMPONENTS AND ${_PYTHON_PREFIX}_NumPy_FOUND
+ AND NOT TARGET ${_PYTHON_PREFIX}::NumPy AND TARGET ${_PYTHON_PREFIX}::Module)
+ add_library (${_PYTHON_PREFIX}::NumPy INTERFACE IMPORTED)
+ set_property (TARGET ${_PYTHON_PREFIX}::NumPy
+ PROPERTY INTERFACE_INCLUDE_DIRECTORIES "${${_PYTHON_PREFIX}_NumPy_INCLUDE_DIRS}")
+ target_link_libraries (${_PYTHON_PREFIX}::NumPy INTERFACE ${_PYTHON_PREFIX}::Module)
+ endif()
+endif()
+
+# final clean-up
+
+# Restore CMAKE_FIND_APPBUNDLE
+if (DEFINED _${_PYTHON_PREFIX}_CMAKE_FIND_APPBUNDLE)
+ set (CMAKE_FIND_APPBUNDLE ${_${_PYTHON_PREFIX}_CMAKE_FIND_APPBUNDLE})
+ unset (_${_PYTHON_PREFIX}_CMAKE_FIND_APPBUNDLE)
+else()
+ unset (CMAKE_FIND_APPBUNDLE)
+endif()
+# Restore CMAKE_FIND_FRAMEWORK
+if (DEFINED _${_PYTHON_PREFIX}_CMAKE_FIND_FRAMEWORK)
+ set (CMAKE_FIND_FRAMEWORK ${_${_PYTHON_PREFIX}_CMAKE_FIND_FRAMEWORK})
+ unset (_${_PYTHON_PREFIX}_CMAKE_FIND_FRAMEWORK)
+else()
+ unset (CMAKE_FIND_FRAMEWORK)
+endif()
+
+cmake_policy(POP)
--- /dev/null
+#[=======================================================================[.rst:
+FindQuadMath
+------------
+
+Find the GCC Quad-Precision library
+
+This module checks if the used compiler has built-in support for QuadMath
+by compiling a small source file.
+
+Imported Targets
+^^^^^^^^^^^^^^^^
+
+This module provides the following imported targets, if found:
+
+``QuadMath::QuadMath``
+ Library to link against if QuadMath should be used.
+
+Result Variables
+^^^^^^^^^^^^^^^^
+
+This will define the following variables:
+
+``QuadMath_FOUND``
+ True if the Quad-Precision library was found.
+
+#]=======================================================================]
+
+# Add a feature summary for this package
+include(FeatureSummary)
+set_package_properties(QuadMath PROPERTIES
+ DESCRIPTION "GCC Quad-Precision Math Library"
+ URL "https://gcc.gnu.org/onlinedocs/libquadmath"
+)
+
+# Check if QuadMath support is built into the compiler
+include(CheckCXXSourceCompiles)
+include(CMakePushCheckState)
+cmake_push_check_state()
+set(CMAKE_REQUIRED_LIBRARIES quadmath)
+if(${CMAKE_CXX_COMPILER_ID} STREQUAL GNU)
+ set(CMAKE_REQUIRED_FLAGS "-fext-numeric-literals")
+endif()
+check_cxx_source_compiles("
+#include <quadmath.h>
+
+int main ()
+{
+ __float128 r = 1.0q;
+ r = strtoflt128(\"1.2345678\", NULL);
+ return 0;
+}" QuadMath_COMPILES)
+cmake_pop_check_state() # Reset CMAKE_REQUIRED_XXX variables
+
+if(QuadMath_COMPILES)
+ # Use additional variable for better report message
+ set(QuadMath_VAR "(Supported by compiler)")
+endif()
+
+# Report that package was found
+include(FindPackageHandleStandardArgs)
+find_package_handle_standard_args(QuadMath
+ DEFAULT_MSG
+ QuadMath_VAR QuadMath_COMPILES
+)
+
+# add imported target for quadmath
+if(QuadMath_FOUND AND NOT TARGET QuadMath::QuadMath)
+ # Compiler supports QuadMath: Add appropriate linker flag
+ add_library(QuadMath::QuadMath INTERFACE IMPORTED)
+ target_link_libraries(QuadMath::QuadMath INTERFACE quadmath)
+
+ target_compile_definitions(QuadMath::QuadMath INTERFACE
+ _GLIBCXX_USE_FLOAT128
+ )
+ target_compile_options(QuadMath::QuadMath INTERFACE
+ $<$<CXX_COMPILER_ID:GNU>:-fext-numeric-literals>
+ )
+endif()
--- /dev/null
+# .. cmake_module::
+#
+# Find Sphinx - the python documentation tool
+#
+# You may set the following variables to modify the
+# behaviour of this module:
+#
+# :ref:`SPHINX_ROOT`
+# the path to look for sphinx with the highest priority
+#
+# The following variables are set by this module:
+#
+# :code:`SPHINX_FOUND`
+# whether Sphinx was found
+#
+# :code:`SPHINX_EXECUTABLE`
+# the path to the sphinx-build executable
+#
+# .. cmake_variable:: SPHINX_ROOT
+#
+# You may set this variable to have :ref:`FindSphinx` look
+# for the :code:`sphinx-build` executable in the given path
+# before inspecting system paths.
+#
+
+#TODO export version.
+
+find_program(SPHINX_EXECUTABLE
+ NAMES sphinx-build
+ PATHS ${SPHINX_ROOT}
+ NO_DEFAULT_PATH)
+
+find_program(SPHINX_EXECUTABLE
+ NAMES sphinx-build)
+
+include(FindPackageHandleStandardArgs)
+find_package_handle_standard_args(
+ "Sphinx"
+ DEFAULT_MSG
+ SPHINX_EXECUTABLE
+)
--- /dev/null
+#[=======================================================================[.rst:
+FindSuiteSparse
+---------------
+
+Find the SuiteSparse libraries like UMFPACK or SPQR.
+
+Use this module by invoking find_package with the form:
+
+ find_package(SuiteSparse
+ [<version>] [EXACT] # Minimum or EXACT version e.g. 5.1
+ [REQUIRED] # Fail with error if SuiteSparse is not found
+ [COMPONENTS <libs>...] # SuiteSparse libraries by their canonical name
+ # e.g. "UMFPACK" or "SPQR"
+ [OPTIONAL_COMPONENTS <libs>...]
+ # Optional SuiteSparse libraries by their canonical name
+ ) # e.g. "UMFPACK" or "SPQR"
+
+Components
+^^^^^^^^^^
+
+The SuiteSparse module allows to search for the following components
+
+``CHOLMOD``
+ Supernodal Cholesky factorization.
+``CSparse`` and ``CXSparse``
+ A Concise Sparse Matrix package.
+``GraphBLAS``
+ Graph algorithms and primitives using semiring algebra. (SuiteSparse >= 5.6)
+``KLU`` and ``BTF``
+ Sparse LU factorization, well-suited for circuit simulation.
+``LDL``
+ A sparse LDL' factorization and solve package.
+``Mongoose``
+ A graph partitioning library. (SuiteSparse >= 5.5)
+``SPQR``
+ Multifrontal QR factorization.
+``UMFPACK``
+ Multifrontal LU factorization.
+
+And ordering methods: ``AMD``, ``CAMD``, ``COLAMD``, and ``CCOLAMD``.
+
+Imported Targets
+^^^^^^^^^^^^^^^^
+
+This module provides the following imported targets, if found:
+
+``SuiteSparse::SuiteSparse``
+ A meta library including all the requested optional or required components.
+``SuiteSparse::<COMPONENT>``
+ Library and include directories for the found ``<COMPONENT>``.
+
+Result Variables
+^^^^^^^^^^^^^^^^
+
+This will define the following variables:
+
+``SuiteSparse_FOUND``
+ True if all the (required) components are found
+``SuiteSparse_<COMPONENT>_FOUND``
+ True if a searched ``<COMPONENT>`` is found
+
+Input and Cache Variables
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+You may set the following variables to modify the behaviour of
+this module:
+
+``SuiteSparse_ROOT``
+ The root directory of the SuiteSparse installation, containing
+ subdirectories :code:`include/` and :code:`lib/` including the
+ header files and libraries of SuiteSparse and its components,
+ respectively.
+``SUITESPARSE_INCLUDE_DIR``
+ The directory containing ``SuiteSparse_config.h``.
+``SUITESPARSE_CONFIG_LIB``
+ The path to the suitesparseconfig library.
+
+#]=======================================================================]
+
+# text for feature summary
+include(FeatureSummary)
+set_package_properties("SuiteSparse" PROPERTIES
+ DESCRIPTION "A suite of sparse matrix software"
+ URL "http://faculty.cse.tamu.edu/davis/suitesparse.html"
+)
+
+# find package dependencies first
+include(CMakeFindDependencyMacro)
+find_package(LAPACK QUIET)
+find_package(BLAS QUIET)
+
+# list of possible component names
+set(SUITESPARSE_COMPONENTS
+ "AMD" "BTF" "CAMD" "CCOLAMD" "CHOLMOD" "COLAMD"
+ "CSparse" "CXSparse" "KLU" "LDL" "SPQR" "UMFPACK")
+
+# Define required and optional component dependencies
+set(SUITESPARSE_CHOLDMOD_REQUIRED_DEPENDENCIES "AMD" "COLAMD" "CCOLAMD")
+set(SUITESPARSE_CHOLDMOD_REQUIRES_BLAS TRUE)
+set(SUITESPARSE_CHOLDMOD_REQUIRES_LAPACK TRUE)
+set(SUITESPARSE_KLU_REQUIRED_DEPENDENCIES "AMD" "COLAMD" "BTF")
+set(SUITESPARSE_KLU_OPTIONAL_DEPENDENCIES "CHOLMOD" "CAMD" "CCOLAMD")
+set(SUITESPARSE_SPQR_REQUIRED_DEPENDENCIES "CHOLMOD" "AMD" "COLAMD")
+set(SUITESPARSE_SPQR_REQUIRES_BLAS TRUE)
+set(SUITESPARSE_SPQR_REQUIRES_LAPACK TRUE)
+set(SUITESPARSE_UMFPACK_REQUIRED_DEPENDENCIES "AMD")
+set(SUITESPARSE_UMFPACK_OPTIONAL_DEPENDENCIES "CHOLMOD" "CAMD" "CCOLAMD" "COLAMD")
+set(SUITESPARSE_UMFPACK_REQUIRES_BLAS TRUE)
+
+# look for library suitesparseconfig
+find_library(SUITESPARSE_CONFIG_LIB "suitesparseconfig"
+ PATH_SUFFIXES "SuiteSparse_config"
+)
+# look for header file SuiteSparse_config.h
+find_path(SUITESPARSE_INCLUDE_DIR "SuiteSparse_config.h"
+ PATH_SUFFIXES "suitesparse" "include" "SuiteSparse_config"
+)
+
+get_filename_component(SUITESPARSE_LIB_DIR ${SUITESPARSE_CONFIG_LIB} DIRECTORY)
+mark_as_advanced(SUITESPARSE_INCLUDE_DIR SUITESPARSE_CONFIG_LIB)
+
+foreach(_component ${SUITESPARSE_COMPONENTS})
+ string(TOLOWER ${_component} _componentLower)
+
+ # look for library of the component
+ find_library(${_component}_LIBRARY "${_componentLower}"
+ HINTS ${SUITESPARSE_LIB_DIR}
+ PATH_SUFFIXES "${_component}/Lib"
+ )
+ # look for header file of the component
+ find_path(${_component}_INCLUDE_DIR "${_componentLower}.h"
+ HINTS ${SUITESPARSE_INCLUDE_DIR}
+ PATH_SUFFIXES "suitesparse" "include" "${_component}/Include"
+ )
+
+ mark_as_advanced(${_component}_INCLUDE_DIR ${_component}_LIBRARY)
+endforeach()
+
+# Look for the header files that have different header file names
+find_path(SPQR_INCLUDE_DIR "SuiteSparseQR.hpp"
+ HINTS ${SUITESPARSE_INCLUDE_DIR}
+ PATH_SUFFIXES "suitesparse" "include" "SPQR/Include"
+)
+find_path(Mongoose_INCLUDE_DIR "Mongoose.hpp"
+ HINTS ${SUITESPARSE_INCLUDE_DIR}
+ PATH_SUFFIXES "suitesparse" "include" "Mongoose/Include"
+)
+find_path(GraphBLAS_INCLUDE_DIR "GraphBLAS.h"
+ HINTS ${SUITESPARSE_INCLUDE_DIR}
+ PATH_SUFFIXES "suitesparse" "include" "GraphBLAS/Include"
+)
+
+# check version of SuiteSparse
+find_file(SUITESPARSE_CONFIG_FILE "SuiteSparse_config.h"
+ HINTS ${SUITESPARSE_INCLUDE_DIR}
+ NO_DEFAULT_PATH)
+if(SUITESPARSE_CONFIG_FILE)
+ file(READ "${SUITESPARSE_CONFIG_FILE}" suitesparseconfig)
+ string(REGEX REPLACE ".*#define SUITESPARSE_MAIN_VERSION[ ]+([0-9]+).*" "\\1"
+ SUITESPARSE_MAJOR_VERSION "${suitesparseconfig}")
+ string(REGEX REPLACE ".*#define SUITESPARSE_SUB_VERSION[ ]+([0-9]+).*" "\\1"
+ SUITESPARSE_MINOR_VERSION "${suitesparseconfig}")
+ string(REGEX REPLACE ".*#define SUITESPARSE_SUBSUB_VERSION[ ]+([0-9]+).*" "\\1"
+ SUITESPARSE_PREFIX_VERSION "${suitesparseconfig}")
+ if(SUITESPARSE_MAJOR_VERSION GREATER_EQUAL 0)
+ set(SuiteSparse_VERSION "${SUITESPARSE_MAJOR_VERSION}")
+ endif()
+ if (SUITESPARSE_MINOR_VERSION GREATER_EQUAL 0)
+ set(SuiteSparse_VERSION "${SuiteSparse_VERSION}.${SUITESPARSE_MINOR_VERSION}")
+ endif()
+ if (SUITESPARSE_PREFIX_VERSION GREATER_EQUAL 0)
+ set(SuiteSparse_VERSION "${SuiteSparse_VERSION}.${SUITESPARSE_PREFIX_VERSION}")
+ endif()
+endif()
+unset(SUITESPARSE_CONFIG_FILE CACHE)
+
+
+# check wether everything was found
+foreach(_component ${SUITESPARSE_COMPONENTS})
+ if(${_component}_LIBRARY AND ${_component}_INCLUDE_DIR)
+ set(SuiteSparse_${_component}_FOUND TRUE)
+ else()
+ set(SuiteSparse_${_component}_FOUND FALSE)
+ endif()
+endforeach(_component)
+
+# test for required dependencies
+foreach(_component ${SUITESPARSE_COMPONENTS})
+ foreach(_dependency ${SUITESPARSE_${_component}_REQUIRED_DEPENDENCIES})
+ if(NOT SuiteSparse_${_dependency}_FOUND)
+ set(SuiteSparse_${_component}_FOUND FALSE)
+ endif()
+ endforeach(_dependency)
+endforeach(_component)
+
+# SPQR requires SuiteSparse >= 4.3
+if(SPQR_LIBRARY)
+ if(SuiteSparse_VERSION VERSION_LESS "4.3")
+ set(SuiteSparse_SPQR_FOUND FALSE)
+ endif()
+endif()
+
+
+# behave like a CMake module is supposed to behave
+include(FindPackageHandleStandardArgs)
+find_package_handle_standard_args("SuiteSparse"
+ REQUIRED_VARS
+ SUITESPARSE_CONFIG_LIB SUITESPARSE_INCLUDE_DIR BLAS_FOUND
+ VERSION_VAR
+ SuiteSparse_VERSION
+ HANDLE_COMPONENTS
+)
+
+# if both headers and library for all required components are found,
+# then create imported targets for all components
+if(SuiteSparse_FOUND)
+ if(NOT TARGET SuiteSparse::SuiteSparse_config)
+ add_library(SuiteSparse::SuiteSparse_config UNKNOWN IMPORTED)
+ set_target_properties(SuiteSparse::SuiteSparse_config PROPERTIES
+ IMPORTED_LOCATION ${SUITESPARSE_CONFIG_LIB}
+ INTERFACE_INCLUDE_DIRECTORIES ${SUITESPARSE_INCLUDE_DIR}
+ )
+ endif()
+
+ # Define component imported-targets
+ foreach(_component ${SUITESPARSE_COMPONENTS})
+ if(SuiteSparse_${_component}_FOUND AND NOT TARGET SuiteSparse::${_component})
+ add_library(SuiteSparse::${_component} UNKNOWN IMPORTED)
+ set_target_properties(SuiteSparse::${_component} PROPERTIES
+ IMPORTED_LOCATION ${${_component}_LIBRARY}
+ INTERFACE_INCLUDE_DIRECTORIES ${${_component}_INCLUDE_DIR}
+ INTERFACE_LINK_LIBRARIES SuiteSparse::SuiteSparse_config
+ )
+ endif()
+ endforeach(_component)
+
+ foreach(_component ${SUITESPARSE_COMPONENTS})
+ # Link required dependencies
+ foreach(_dependency ${SUITESPARSE_${_component}_REQUIRED_DEPENDENCIES})
+ target_link_libraries(SuiteSparse::${_component}
+ INTERFACE SuiteSparse::${_dependency})
+ endforeach(_dependency)
+
+ # Link found optional dependencies
+ foreach(_dependency ${SUITESPARSE_${_component}_OPTIONAL_DEPENDENCIES})
+ if(SuiteSparse_${_dependency}_FOUND)
+ target_link_libraries(SuiteSparse::${_component}
+ INTERFACE SuiteSparse::${_dependency})
+ endif()
+ endforeach(_dependency)
+
+ # Link BLAS library
+ if(SUITESPARSE_${_component}_REQUIRES_BLAS)
+ if(TARGET BLAS::BLAS)
+ target_link_libraries(SuiteSparse::${_component}
+ INTERFACE BLAS::BLAS)
+ else()
+ target_link_libraries(SuiteSparse::${_component}
+ INTERFACE ${BLAS_LINKER_FLAGS} ${BLAS_LIBRARIES})
+ endif()
+ endif()
+
+ # Link LAPACK library
+ if(SUITESPARSE_${_component}_REQUIRES_LAPACK)
+ if(TARGET LAPACK::LAPACK)
+ target_link_libraries(SuiteSparse::${_component}
+ INTERFACE LAPACK::LAPACK)
+ else()
+ target_link_libraries(SuiteSparse::${_component}
+ INTERFACE ${LAPACK_LINKER_FLAGS} ${LAPACK_LIBRARIES})
+ endif()
+ endif()
+ endforeach(_component)
+
+ # Combine all requested components to an imported target
+ if(NOT TARGET SuiteSparse::SuiteSparse)
+ add_library(SuiteSparse::SuiteSparse INTERFACE IMPORTED)
+ target_link_libraries(SuiteSparse::SuiteSparse
+ INTERFACE SuiteSparse::SuiteSparse_config)
+ endif()
+ foreach(_component ${SuiteSparse_FIND_COMPONENTS})
+ if(SuiteSparse_${_component}_FOUND)
+ set(HAVE_SUITESPARSE_${_component} TRUE)
+ target_link_libraries(SuiteSparse::SuiteSparse
+ INTERFACE SuiteSparse::${_component})
+ endif()
+ endforeach(_component)
+endif()
--- /dev/null
+#[=======================================================================[.rst:
+FindTBB
+-------
+
+Finds the Threading Building Blocks (TBB) library.
+
+This is a fallback implementation in case the TBB library does not provide
+itself a corresponding TBBConfig.cmake file.
+
+Imported Targets
+^^^^^^^^^^^^^^^^
+
+This module provides the following imported targets, if found:
+
+``TBB::tbb``
+ Imported library to link against if TBB should be used.
+
+Result Variables
+^^^^^^^^^^^^^^^^
+
+This will define the following variables:
+
+``TBB_FOUND``
+ True if the TBB library was found.
+
+Finding the TBB library
+^^^^^^^^^^^^^^^^^^^^^^^
+
+Two strategies are implemented for finding the TBB library:
+
+1. Searching for the TBB cmake config file, typically named
+ ``TBBConfig.cmake``. In recent TBB versions, this file can be
+ created using a script provided by TBB itself. Simply set the
+ variable ``TBB_DIR`` to the directory containing the config file
+ in order to find TBB.
+
+2. Using pkg-config to configure TBB. Therefore it is necessary
+ to find the ``tbb.pc`` file. Several distributions provide this file
+ directly. In order to point pkg-config to the location of that file,
+ simply set the environmental variable ``PKG_CONFIG_PATH`` to include
+ the directory containing the .pc file, or add this path to the
+ ``CMAKE_PREFIX_PATH``.
+
+#]=======================================================================]
+
+
+# text for feature summary
+include(FeatureSummary)
+set_package_properties("TBB" PROPERTIES
+ DESCRIPTION "Intel's Threading Building Blocks"
+)
+
+# first, try to find TBBs cmake configuration
+find_package(TBB ${TBB_FIND_VERSION} QUIET CONFIG)
+if(TBB_FOUND AND TARGET TBB::tbb)
+ message(STATUS "Found TBB: using configuration from TBB_DIR=${TBB_DIR} (found version \"${TBB_VERSION}\")")
+ return()
+endif()
+
+# Add a backport of cmakes FindPkgConfig module
+if(${CMAKE_VERSION} VERSION_LESS "3.19.4")
+ list(INSERT CMAKE_MODULE_PATH 0 "${CMAKE_CURRENT_LIST_DIR}/FindPkgConfig")
+endif()
+
+# second, try to find TBBs pkg-config file
+find_package(PkgConfig)
+if(PkgConfig_FOUND)
+ if(TBB_FIND_VERSION)
+ pkg_check_modules(PkgConfigTBB tbb>=${TBB_FIND_VERSION} QUIET IMPORTED_TARGET GLOBAL)
+ else()
+ pkg_check_modules(PkgConfigTBB tbb QUIET IMPORTED_TARGET GLOBAL)
+ endif()
+endif()
+
+# check whether the static library was found
+if(PkgConfigTBB_STATIC_FOUND)
+ set(_tbb PkgConfigTBB_STATIC)
+else()
+ set(_tbb PkgConfigTBB)
+endif()
+
+include(FindPackageHandleStandardArgs)
+find_package_handle_standard_args("TBB"
+ REQUIRED_VARS
+ ${_tbb}_LINK_LIBRARIES ${_tbb}_FOUND PkgConfig_FOUND
+ VERSION_VAR
+ ${_tbb}_VERSION
+ FAIL_MESSAGE "Could NOT find TBB (set TBB_DIR to path containing TBBConfig.cmake or set PKG_CONFIG_PATH to include the location of the tbb.pc file)"
+)
+
+if(${_tbb}_FOUND AND NOT TARGET TBB::tbb)
+ add_library(TBB::tbb ALIAS PkgConfig::PkgConfigTBB)
+endif()
--- /dev/null
+# .. cmake_variable:: ENABLE_HEADERCHECK
+#
+# Set this variable to TRUE if you want to use the CMake
+# reimplementation of the old autotools feaure :code:`make headercheck`.
+# There has been a couple of issues with this implementation in
+# the past, so it was deactivated by default.
+#
+include_guard(GLOBAL)
+
+# sets up a global property with the names of all header files
+# in the module and a global target depending on all checks
+macro(setup_headercheck)
+ #glob for headers
+ file(GLOB_RECURSE all_headers "*.hh")
+ # strip hidden files
+ string(REGEX REPLACE "[^;]*/\\.[^;/]*\\.hh;?" "" headers "${all_headers}")
+ set_property(GLOBAL PROPERTY headercheck_list ${headers})
+
+ #define headercheck target
+ dune_module_path(MODULE dune-common RESULT scriptdir SCRIPT_DIR)
+ if(NOT TARGET headercheck)
+ add_custom_target(headercheck ${CMAKE_COMMAND}
+ -DENABLE_HEADERCHECK=${ENABLE_HEADERCHECK}
+ -P ${scriptdir}/FinalizeHeadercheck.cmake
+ WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
+ endif()
+endmacro(setup_headercheck)
+
+# these macros are used to exclude headers from make headercheck
+# call this from a CMakeLists.txt file with a list of headers in that directory
+macro(exclude_from_headercheck)
+ #make this robust to argument being passed with or without ""
+ string(REGEX REPLACE "[\ \n]+([^\ ])" ";\\1" list ${ARGV0})
+ set(list "${list};${ARGV}")
+ get_property(headerlist GLOBAL PROPERTY headercheck_list)
+ foreach(item ${list})
+ list(REMOVE_ITEM headerlist "${CMAKE_CURRENT_SOURCE_DIR}/${item}")
+ endforeach()
+ set_property(GLOBAL PROPERTY headercheck_list ${headerlist})
+endmacro(exclude_from_headercheck)
+
+macro(exclude_dir_from_headercheck)
+ file(GLOB list RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "*.hh")
+ exclude_from_headercheck(${list})
+endmacro(exclude_dir_from_headercheck)
+
+macro(exclude_subdir_from_headercheck DIRNAME)
+ file(GLOB_RECURSE exlist "${CMAKE_CURRENT_SOURCE_DIR}/${DIRNAME}/*.hh")
+ get_property(headerlist GLOBAL PROPERTY headercheck_list)
+ foreach(item ${exlist})
+ list(REMOVE_ITEM headerlist "${item}")
+ endforeach()
+ set_property(GLOBAL PROPERTY headercheck_list ${headerlist})
+endmacro(exclude_subdir_from_headercheck)
+
+macro(exclude_all_but_from_headercheck)
+ file(GLOB excllist RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "*.hh")
+ #make this robust to argument being passed with or without ""
+ string(REGEX REPLACE "[\ \n]+([^\ \n])" ";\\1" list ${ARGV0})
+ set(list "${list};${ARGV}")
+ foreach(item ${list})
+ list(REMOVE_ITEM excllist ${item})
+ endforeach()
+ exclude_from_headercheck(${excllist})
+endmacro(exclude_all_but_from_headercheck)
+
+# configure all headerchecks
+macro(finalize_headercheck)
+ if(ENABLE_HEADERCHECK)
+ get_property(headerlist GLOBAL PROPERTY headercheck_list)
+ foreach(header ${headerlist})
+ #do some name conversion
+ string(REGEX REPLACE ".*/([^/]*)" "\\1" simple ${header})
+ string(REPLACE ${PROJECT_SOURCE_DIR} "" rel ${header})
+ string(REGEX REPLACE "(.*)/[^/]*" "\\1" relpath ${rel})
+ string(REGEX REPLACE "/" "_" targname ${rel})
+
+ #generate the headercheck .cc file
+ file(WRITE ${CMAKE_BINARY_DIR}/headercheck/${rel}.cc "#ifdef HAVE_CONFIG_H\n#include<config.h>\n#endif\n#include<${simple}>\n#include<${simple}>\nint main(){return 0;}")
+
+ # add target for the check of current header, this is implemented as a library
+ # to prevent CMake from automatically trying to link the target, functionality
+ # of macro try_compile() is unfortunately not availbale due to it not being scriptable.
+ add_library(headercheck_${targname} STATIC EXCLUDE_FROM_ALL
+ ${CMAKE_BINARY_DIR}/headercheck/${rel}.cc)
+ add_dependencies(headercheck headercheck_${targname})
+
+ #add PKG_ALL_FLAGS and the directory where the header is located
+ set_property(TARGET headercheck_${targname}
+ APPEND_STRING PROPERTY COMPILE_FLAGS "-DHEADERCHECK -I${PROJECT_SOURCE_DIR}${relpath} -I${CMAKE_BINARY_DIR}")
+ set_property(TARGET headercheck_${targname} PROPERTY ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/headercheck/${relpath}")
+ add_dune_all_flags(headercheck_${targname})
+ unset(headercheck_${targname}_LIB_DEPENDS CACHE)
+ endforeach(header ${headerlist})
+ endif()
+endmacro(finalize_headercheck)
--- /dev/null
+# check whether the user wants to overload compile flags upon calling make
+#
+# Provides the following macros:
+#
+# initialize_compiler_script() : needs to be called before further flags are added to CMAKE_CXX_FLAGS
+# finalize_compiler_script() : needs to be called at the end of the cmake macros, e.g. in finalize_dune_project
+#
+# Those two macro calls are hooked into dune_project/finalize_dune_project.
+#
+# .. cmake_variable:: ALLOW_CXXFLAGS_OVERWRITE
+#
+# Setting this option will allow you to overload preprocessor definitions from
+# the command line, as it was possible naturally with the autotools build system.
+# This feature only works with a :code:`Unix Makefiles` based generator. You can
+# use it as:
+#
+# :code:`make CXXFLAGS="your flags" GRIDTYPE="grid type"`
+#
+# :code:`GRIDTYPE` can be anything defined in :code:`config.h` via the :ref:`dune_define_gridtype` macro from dune-grid.
+# Furthermore any CPP variable of the form :code:`-DVAR=VALUE` can be overloaded on the command line.
+#
+# .. note::
+# If you don't know what this is or what it's good for, don't use it.
+#
+include_guard(GLOBAL)
+
+option(ALLOW_CXXFLAGS_OVERWRITE OFF)
+option(ALLOW_CFLAGS_OVERWRITE OFF)
+
+set(CXX_COMPILER_SCRIPT "${CMAKE_BINARY_DIR}/CXX_compiler.sh" )
+set(C_COMPILER_SCRIPT "${CMAKE_BINARY_DIR}/C_compiler.sh" )
+
+macro(find_extended_unix_commands)
+ include(FindUnixCommands)
+ set(FLAGSNAMES "ALLOW_CXXFLAGS_OVERWRITE and/or ALLOW_CFLAGS_OVERWRITE")
+ find_program (GREP_PROGRAM grep)
+ if(NOT GREP_PROGRAM)
+ message( SEND_ERROR "grep not found, please disable ${FLAGSNAMES}")
+ endif()
+ find_program (SED_PROGRAM sed)
+ if(NOT SED_PROGRAM)
+ message( SEND_ERROR "sed not found, please disable ${FLAGSNAMES}")
+ endif()
+ find_program (CUT_PROGRAM cut)
+ if(NOT CUT_PROGRAM)
+ message( SEND_ERROR "cut not found, please disable ${FLAGSNAMES}")
+ endif()
+ find_program (ENV_PROGRAM env)
+ if(NOT ENV_PROGRAM)
+ message( SEND_ERROR "env not found, please disable ${FLAGSNAMES}")
+ endif()
+ find_program (ECHO_PROGRAM echo)
+ if(NOT ECHO_PROGRAM)
+ message( SEND_ERROR "echo not found, please disable ${FLAGSNAMES}")
+ endif()
+ find_program (CHMOD_PROGRAM chmod)
+ if(NOT CHMOD_PROGRAM)
+ message( SEND_ERROR "chmod not found, please disable ${FLAGSNAMES}")
+ endif()
+ mark_as_advanced(GREP_PROGRAM)
+ mark_as_advanced(SED_PROGRAM)
+ mark_as_advanced(CUT_PROGRAM)
+ mark_as_advanced(ENV_PROGRAM)
+ mark_as_advanced(ECHO_PROGRAM)
+ mark_as_advanced(CHMOD_PROGRAM)
+endmacro(find_extended_unix_commands)
+
+# init compiler script and store CXX flags
+macro(initialize_compiler_script)
+ if(ALLOW_CXXFLAGS_OVERWRITE AND (${CMAKE_GENERATOR} MATCHES ".*Unix Makefiles.*"))
+ # check for unix commands necessary
+ find_extended_unix_commands()
+ # set CXXFLAGS as environment variable
+ set( DEFAULT_CXXFLAGS ${CMAKE_CXX_FLAGS} CACHE STRING "default CXX flags")
+ set( CMAKE_CXX_FLAGS "" )
+ set( DEFAULT_CXX_COMPILER ${CMAKE_CXX_COMPILER} )
+ set( CXX_COMPILER_SCRIPT_FILE "#!${BASH}\nexec ${CMAKE_CXX_COMPILER} \"\$@\"")
+ file(WRITE ${CXX_COMPILER_SCRIPT} "${CXX_COMPILER_SCRIPT_FILE}")
+ execute_process(COMMAND ${CHMOD_PROGRAM} 755 ${CXX_COMPILER_SCRIPT})
+ set(CMAKE_CXX_COMPILER ${CXX_COMPILER_SCRIPT})
+ endif()
+ if(ALLOW_CFLAGS_OVERWRITE AND (${CMAKE_GENERATOR} MATCHES ".*Unix Makefiles.*"))
+ # check for unix commands necessary
+ find_extended_unix_commands()
+ # set CFLAGS as environment variable
+ set( DEFAULT_CFLAGS ${CMAKE_C_FLAGS} CACHE STRING "default C flags")
+ set( CMAKE_C_FLAGS "" )
+ set( DEFAULT_C_COMPILER ${CMAKE_C_COMPILER} )
+ set( C_COMPILER_SCRIPT_FILE "#!${BASH}\nexec ${CMAKE_C_COMPILER} \"\$@\"")
+ file(WRITE ${C_COMPILER_SCRIPT} "${C_COMPILER_SCRIPT_FILE}")
+ execute_process(COMMAND ${CHMOD_PROGRAM} 755 ${C_COMPILER_SCRIPT})
+ set(CMAKE_C_COMPILER ${C_COMPILER_SCRIPT})
+ endif()
+endmacro()
+
+# finalize compiler script and write it
+macro(finalize_compiler_script)
+ if(${CMAKE_GENERATOR} MATCHES ".*Unix Makefiles.*")
+ # check CXX compiler
+ if((ALLOW_CXXFLAGS_OVERWRITE))
+ set(COMPILERS "CXX")
+ endif()
+ # check C compiler
+ if((ALLOW_CFLAGS_OVERWRITE))
+ set(COMPILERS ${COMPILERS} "C")
+ endif()
+
+ # for the found compilers for flag overloading generate compiler script
+ foreach(COMP ${COMPILERS})
+ set( COMPILER_SCRIPT_FILE "#!${BASH}\nSED=${SED_PROGRAM}\nGREP=${GREP_PROGRAM}")
+ set( COMPILER_SCRIPT_FILE "${COMPILER_SCRIPT_FILE}\nCUT=${CUT_PROGRAM}\nENV=${ENV_PROGRAM}\nECHO=${ECHO_PROGRAM}")
+ set( COMPILER_SCRIPT_FILE "${COMPILER_SCRIPT_FILE}\n# store flags\nFLAGS=\"\$@\"")
+ set( COMPILER_SCRIPT_FILE "${COMPILER_SCRIPT_FILE}\nMAKE_EXECUTABLE_NEW=0\n")
+ set( COMPILER_SCRIPT_FILE "${COMPILER_SCRIPT_FILE}\nif [ \"\$${COMP}FLAGS\" == \"\" ]; then\n # default ${COMP} flags\n ${COMP}FLAGS=\"${DEFAULT_CXXFLAGS}\"\nfi\n")
+ set( COMPILER_SCRIPT_FILE "${COMPILER_SCRIPT_FILE}\nif [ \"\$EXTRA_${COMP}FLAGS\" != \"\" ]; then\n # extra ${COMP} flags\n ${COMP}FLAGS=\"$${COMP}FLAGS $EXTRA_${COMP}FLAGS\"\nfi\n")
+ # only for CXX we need to scan config.h for GRIDTYPE
+ if( ${COMP} STREQUAL "CXX" )
+ set( COMPILER_SCRIPT_FILE "${COMPILER_SCRIPT_FILE}\nGRIDS=\nCONFIG_H=${CMAKE_BINARY_DIR}/config.h")
+ set( COMPILER_SCRIPT_FILE "${COMPILER_SCRIPT_FILE}\nif [ \"\$GRIDTYPE\" != \"\" ]; then")
+ set( COMPILER_SCRIPT_FILE "${COMPILER_SCRIPT_FILE}\n GRIDS=`\$GREP \"defined USED_[A-Z_]*_GRIDTYPE\" \$CONFIG_H | \$SED 's/\\(.*defined USED\\_\\)\\(.*\\)\\(\\_GRIDTYPE*\\)/\\2/g'`\nfi\n")
+ endif()
+ set( COMPILER_SCRIPT_FILE "${COMPILER_SCRIPT_FILE}\nOLDFLAGS=\$FLAGS\nFLAGS=")
+ set( COMPILER_SCRIPT_FILE "${COMPILER_SCRIPT_FILE}\nfor FLAG in \$OLDFLAGS; do")
+ set( COMPILER_SCRIPT_FILE "${COMPILER_SCRIPT_FILE}\n NEWFLAG=\$FLAG\n VARNAME=`\$ECHO \$FLAG | \$GREP \"\\-D\" | \$SED 's/-D//g'`")
+ # only for CXX we have GRIDTYPE
+ if( ${COMP} STREQUAL "CXX" )
+ set( COMPILER_SCRIPT_FILE "${COMPILER_SCRIPT_FILE}\n for GRID in \$GRIDS; do\n if [ \"\$VARNAME\" == \"\$GRID\" ]; then\n NEWFLAG=\"-D\$GRIDTYPE\"\n break\n fi\n done")
+ endif()
+ set( COMPILER_SCRIPT_FILE "${COMPILER_SCRIPT_FILE}\n VARNAME=`\$ECHO \$VARNAME | \$GREP \"=\" | \$CUT -d \"=\" -f 1`")
+ set( COMPILER_SCRIPT_FILE "${COMPILER_SCRIPT_FILE}\n if [ \"\$VARNAME\" != \"\" ]; then\n VAR=`\$ENV | \$GREP \$VARNAME`\n if [ \"\$VAR\" != \"\" ]; then")
+ set( COMPILER_SCRIPT_FILE "${COMPILER_SCRIPT_FILE}\n # add variable from environment to flags list\n NEWFLAG=\"-D\$VARNAME=\${!VARNAME}\"\n fi\n fi")
+ set( COMPILER_SCRIPT_FILE "${COMPILER_SCRIPT_FILE}\n FLAGS=\"\$FLAGS \$NEWFLAG\"\ndone")
+ set( COMPILER_SCRIPT_FILE "${COMPILER_SCRIPT_FILE}\n\$ECHO \"${DEFAULT_${COMP}_COMPILER} \$${COMP}FLAGS \$FLAGS\"")
+ set( COMPILER_SCRIPT_FILE "${COMPILER_SCRIPT_FILE}\nexec ${DEFAULT_${COMP}_COMPILER} \$${COMP}FLAGS \$FLAGS")
+ message("-- Generating ${COMP} compiler script for ${COMP}FLAGS overloading on command line")
+ if( ${COMP} STREQUAL "CXX" )
+ file(WRITE ${CXX_COMPILER_SCRIPT} "${COMPILER_SCRIPT_FILE}")
+ else()
+ file(WRITE ${C_COMPILER_SCRIPT} "${COMPILER_SCRIPT_FILE}")
+ endif()
+ endforeach()
+ endif()
+endmacro()
--- /dev/null
+# Module that provides conversion routines using inkscape
+#
+# .. cmake_function:: inkscape_generate_png_from_svg
+#
+# .. cmake_param:: OUTPUT_DIR
+# :single:
+#
+# The output directory for the generated png files.
+# Defaults to the current build directory.
+#
+# .. cmake_param:: pngfiles
+# :single:
+# :positional:
+# :required:
+#
+# The files that should be converted.
+#
+# .. cmake_param:: DPI
+# :single:
+#
+# dpi value for the generated image (default: 90)
+#
+# TODO Switch to named arguments!
+#
+include_guard(GLOBAL)
+
+include(CMakeParseArguments)
+
+function(inkscape_generate_png_from_svg)
+ if(NOT INKSCAPE)
+ return()
+ endif()
+ cmake_parse_arguments(INKSCAPE "" "OUTPUT_DIR;DPI" "" ${ARGN})
+ if(NOT INKSCAPE_OUTPUT_DIR)
+ set(INKSCAPE_OUTPUT_DIR ${CMAKE_CURRENT_BINARY_DIR})
+ endif()
+ if(NOT INKSCAPE_DPI)
+ set(INKSCAPE_DPI 90)
+ endif()
+
+ foreach(pic ${INKSCAPE_UNPARSED_ARGUMENTS})
+ string(REGEX REPLACE "\\.[a-zA-Z]+" ".svg" input ${pic})
+ if( INKSCAPE_NEW_VERSION )
+ execute_process(
+ COMMAND ${INKSCAPE} --export-dpi=${INKSCAPE_DPI} --export-type=png --export-filename=${pic} ${CMAKE_CURRENT_SOURCE_DIR}/${input}
+ WORKING_DIRECTORY ${INKSCAPE_OUTPUT_DIR})
+ else()
+ execute_process(
+ COMMAND ${INKSCAPE} -z --export-dpi=${INKSCAPE_DPI} -e ${pic} ${CMAKE_CURRENT_SOURCE_DIR}/${input}
+ WORKING_DIRECTORY ${INKSCAPE_OUTPUT_DIR})
+ endif()
+ endforeach()
+endfunction()
--- /dev/null
+# UseLatexMk.cmake is a CMake module to build Latex documents
+# from CMake.
+#
+# add_latex_document(SOURCE texsource
+# [TARGET target]
+# [EXCLUDE_FROM_ALL]
+# [REQUIRED]
+# [FATHER_TARGET father1 [father2 ...]]
+# [RCFILE rcfile1 [rcfile2 ...]]
+# [INSTALL destination]
+# [BUILD_ON_INSTALL]
+# )
+#
+# The arguments:
+# SOURCE
+# Required argument with a single tex source that defines the document to be built
+# TARGET
+# An optional target name, defaults to a suitable mangling of the given source and its path.
+# An additional target with _clean appended will be added as well, which cleans the output
+# and all auxiliary files.
+# EXCLUDE_FROM_ALL
+# Set this to avoid the target from being built by default. If the FATHER_TARGET
+# parameter is set, this option is automatically set.
+# REQUIRED
+# Set this option to issue a fatal error if the document could not
+# be built. By default it is only skipped.
+# FATHER_TARGET
+# A list of meta-targets that should trigger a rebuild of this target (like "make doc").
+# The targets are expected to exist already. Specifying any such targets will automatically add the
+# above EXCLUDE_FROM_ALL option.
+# RCFILE
+# A list configuration file to customize the latexmk build process. These are read by latexmk
+# *after* the automatically generated rc file in the indicated order. Note that latexmk rcfiles
+# override any previous settings.
+# You may also use CMake variables within @'s (like @CMAKE_CURRENT_BINARY_DIR@) and have
+# them replaced with the matching CMake variables (see cmake's configure_file command).
+# Note, that this is a powerful, but advanced feature. For details on what can be achieved
+# see the latexmk manual. Note, that triggering non-PDF builds through latexmkrc files might
+# cause problems with other features of UseLatexMk.
+# INSTALL
+# Set this option to an install directory to create an installation rule for this document.
+# BUILD_ON_INSTALL
+# Set this option, if you want to trigger a build of this document during installation.
+#
+# Furthermore, UseLatexMk defines a CMake target clean_latex which cleans the build tree from
+# all PDF output and all auxiliary files. Note, that (at least for the Unix Makefiles generator)
+# it is not possible to connect this process with the builtin clean target.
+#
+# Please note the following security restriction:
+#
+# UseLatexMk relies on latexmk separating input and output directory correctly.
+# This includes using an absolute path for the output directory. On some TeX
+# systems this requires the disabling of a security measure by setting `openout_any = a`.
+# From the latexmk documentation:
+#
+# Commonly, the directory specified for output files is a subdirectory of the current working direc-
+# tory. However, if you specify some other directory, e.g., "/tmp/foo" or "../output", be aware that
+# this could cause problems, e.g., with makeindex or bibtex. This is because modern versions of
+# these programs, by default, will refuse to work when they find that they are asked to write to a file
+# in a directory that appears not to be the current working directory or one of its subdirectories. This
+# is part of security measures by the whole TeX system that try to prevent malicious or errant TeX
+# documents from incorrectly messing with a user’s files. If for $out_dir or $aux_dir you really do
+# need to specify an absolute pathname (e.g., "/tmp/foo") or a path (e.g., "../output") that includes a
+# higher-level directory, and you need to use makeindex or bibtex, then you need to disable the secu-
+# rity measures (and assume any risks). One way of doing this is to temporarily set an operating
+# system environment variable openout_any to "a" (as in "all"), to override the default "paranoid"
+# setting.
+#
+# UseLatexMk.cmake allows to reenable the TeX security measure by setting LATEXMK_PARANOID to TRUE
+# through cmake -D, but it is not guaranteed to work correctly in that case.
+#
+# For further informations, visit https://github.com/dokempf/UseLatexMk
+#
+#
+# Copyright (c) 2017, Dominic Kempf, Steffen Müthing
+#
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer.
+#
+# * Redistributions in binary form must reproduce the above copyright notice, this
+# list of conditions and the following disclaimer in the documentation and/or
+# other materials provided with the distribution.
+#
+# * Neither the name of the Universität Heidelberg nor the names of its
+# contributors may be used to endorse or promote products derived from this
+# software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+include_guard(GLOBAL)
+
+# Find LATEX and LatexMk
+find_package(LATEX)
+find_package(LatexMk)
+
+# Find the latexmkrc template file shipped alongside UseLatexMk.cmake
+find_file(LATEXMKRC_TEMPLATE
+ latexmkrc.cmake
+ HINTS ${CMAKE_MODULE_PATH}
+ ${CMAKE_CURRENT_SOURCE_DIR}
+ ${PROJECT_SOURCE_DIR}
+ ${PROJECT_SOURCE_DIR}/cmake
+ ${PROJECT_SOURCE_DIR}/cmake/modules
+ NO_CMAKE_FIND_ROOT_PATH
+ )
+
+# Add the clean_latex target
+if(TARGET clean_latex)
+ message(WARNING "clean_latex target already exists. UseLatexMk attaches clean rules to it!")
+else()
+ add_custom_target(clean_latex)
+endif()
+
+set(LATEXMK_SOURCES_BUILD_FROM)
+
+function(add_latex_document)
+ # Parse the input parameters to the function
+ set(OPTION REQUIRED EXCLUDE_FROM_ALL BUILD_ON_INSTALL)
+ set(SINGLE SOURCE TARGET INSTALL)
+ set(MULTI FATHER_TARGET RCFILE)
+ include(CMakeParseArguments)
+ cmake_parse_arguments(LMK "${OPTION}" "${SINGLE}" "${MULTI}" ${ARGN})
+
+ if(LMK_UNPARSED_ARGUMENTS)
+ message("add_latex_document: Unparsed arguments! This often indicates typos in named arguments.")
+ endif()
+
+ # Apply default arguments and check for required arguments
+ if(NOT LMK_SOURCE)
+ message(FATAL_ERROR "No tex source specified for add_latex_document!")
+ endif()
+ if(NOT LMK_TARGET)
+ # Construct a nice target name from the source file
+ get_filename_component(LMK_TARGET ${LMK_SOURCE} ABSOLUTE)
+ file(RELATIVE_PATH LMK_TARGET ${PROJECT_SOURCE_DIR} ${LMK_TARGET})
+ string(REPLACE "/" "_" LMK_TARGET ${LMK_TARGET})
+ string(REPLACE "." "_" LMK_TARGET ${LMK_TARGET})
+ endif()
+ if(LMK_FATHER_TARGET)
+ set(LMK_EXCLUDE_FROM_ALL TRUE)
+ endif()
+ if(LMK_BUILD_ON_INSTALL AND (NOT LMK_INSTALL))
+ message(WARNING "Specified to build on installation, but not installing!")
+ endif()
+
+ # Verify that each source is used exactly once
+ set(ABS_SOURCE ${LMK_SOURCE})
+ if(NOT IS_ABSOLUTE ${ABS_SOURCE})
+ get_filename_component(ABS_SOURCE ${ABS_SOURCE} ABSOLUTE)
+ endif()
+ list(FIND LATEXMK_SOURCES_BUILD_FROM ${ABS_SOURCE} ALREADY_BUILT)
+ if(NOT "${ALREADY_BUILT}" STREQUAL "-1")
+ message(FATAL_ERROR "UseLatexMk: You are building twice from the same source, which is unsupported!")
+ endif()
+ set(LATEXMK_SOURCES_BUILD_FROM ${LATEXMK_SOURCES_BUILD_FROM} ${ABS_SOURCE} PARENT_SCOPE)
+
+ # Check the existence of the latexmk executable and skip/fail if not present
+ if(NOT (LATEXMK_FOUND AND PDFLATEX_COMPILER))
+ if(LMK_REQUIRED)
+ message(FATAL_ERROR "Some Latex documents were required by the project, but LATEX or LatexMk were not found!")
+ else()
+ return()
+ endif()
+ endif()
+
+ # Determine the output name
+ get_filename_component(output ${LMK_SOURCE} NAME_WE)
+ set(OUTPUT_PDF ${CMAKE_CURRENT_BINARY_DIR}/${output}.pdf)
+
+ # Inspect the EXCLUDE_FROM_ALL option
+ if(LMK_EXCLUDE_FROM_ALL)
+ set(ALL_OPTION "")
+ else()
+ set(ALL_OPTION "ALL")
+ endif()
+
+ # Generate a latexmkrc file for this project
+ if(NOT LATEXMKRC_TEMPLATE)
+ message("Fatal error: The latexmkrc template file could not be found. Consider adding its path to CMAKE_MODULE_PATH")
+ endif()
+ set(LATEXMKRC_FILE "${CMAKE_CURRENT_BINARY_DIR}/${LMK_TARGET}.latexmkrc")
+ configure_file(${LATEXMKRC_TEMPLATE} ${LATEXMKRC_FILE} @ONLY)
+ set(LATEXMKRC_OPTIONS -r ${LATEXMKRC_FILE})
+
+ # Process additional latexmkrc files
+ foreach(rcfile ${LMK_RCFILE})
+ get_filename_component(rcfile_base ${rcfile} NAME)
+ set(LATEXMKRC_FILE "${CMAKE_CURRENT_BINARY_DIR}/${LMK_TARGET}_${rcfile_base}")
+ configure_file(${rcfile} ${LATEXMKRC_FILE} @ONLY)
+ set(LATEXMKRC_OPTIONS ${LATEXMKRC_OPTIONS} -r ${LATEXMKRC_FILE})
+ endforeach()
+
+ # Add the BYPRODUCTS parameter, if the CMake version supports it
+ set(BYPRODUCTS_PARAMETER "")
+ if (CMAKE_VERSION VERSION_GREATER "3.2")
+ set(BYPRODUCTS_PARAMETER BYPRODUCTS ${OUTPUT_PDF})
+ endif()
+
+ # Maybe allow latexmk the use of absolute paths
+ if(NOT LATEXMK_PARANOID)
+ set($ENV{openout_any} "a")
+ endif()
+
+ # Call the latexmk executable
+ # NB: Using add_custom_target here results in the target always being outofdate.
+ # This offloads the dependency tracking from cmake to latexmk. This is an
+ # intentional decision of UseLatexMk to avoid listing dependencies of the tex source.
+ add_custom_target(${LMK_TARGET}
+ ${ALL_OPTION}
+ COMMAND ${LATEXMK_EXECUTABLE} ${LATEXMKRC_OPTIONS} ${LMK_SOURCE}
+ WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}"
+ COMMENT "Building PDF from ${LMK_SOURCE}..."
+ ${BYPRODUCTS_PARAMETER}
+ )
+
+ # Add dependencies to father targets
+ foreach(father ${LMK_FATHER_TARGET})
+ if(NOT TARGET ${father})
+ message(FATAL_ERROR "The target given to add_latex_documents FATHER_TARGET parameter does not exist")
+ endif()
+ add_dependencies(${father} ${LMK_TARGET})
+ endforeach()
+
+ # Add installation rules
+ if(LMK_BUILD_ON_INSTALL)
+ install(CODE "execute_process(COMMAND ${CMAKE_COMMAND} --build . --target ${LMK_TARGET} --config $<CONFIGURATION>)")
+ endif()
+ if(LMK_INSTALL)
+ install(FILES ${OUTPUT_PDF}
+ DESTINATION ${LMK_INSTALL}
+ OPTIONAL)
+ endif()
+
+ # Add a clean up rule to the clean_latex target
+ add_custom_target(${LMK_TARGET}_clean
+ COMMAND ${LATEXMK_EXECUTABLE} -C ${LATEXMKRC_OPTIONS} ${LMK_SOURCE}
+ WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}"
+ COMMENT "Cleaning build results from target ${LMK_TARGET}"
+ )
+ add_dependencies(clean_latex ${LMK_TARGET}_clean)
+endfunction()
--- /dev/null
+# .latexmkrc generated by CMake from UseLatexMk.cmake starts here
+
+$bibtex = "@BIBTEX_COMPILER@ %O %S";
+$dvips = "@DVIPS_CONVERTER@ %O -o %D %S";
+$latex = "@LATEX_COMPILER@ %O %S";
+$make = "@CMAKE_MAKE_COMMAND@";
+$makeindex = "@MAKEINDEX_COMPILER@ %O -o %D %S";
+$out_dir = "@CMAKE_CURRENT_BINARY_DIR@";
+$pdf_mode = 1;
+$pdflatex = "@PDFLATEX_COMPILER@ -shell-escape -interaction=nonstopmode %O %S";
+$ps2pdf = "@PS2PDF_CONVERTER@ %O %S %D";
+
+# .latexmkrc generated by CMake from UseLatexMk.cmake ends here
--- /dev/null
+if(NOT @DUNE_MOD_NAME@_FOUND)
+@PACKAGE_INIT@
+
+#import the target
+get_filename_component(_dir "${CMAKE_CURRENT_LIST_FILE}" PATH)
+include("${_dir}/@DUNE_MOD_NAME@-targets.cmake")
+
+#report other information
+set_and_check(@DUNE_MOD_NAME@_PREFIX "${PACKAGE_PREFIX_DIR}")
+set_and_check(@DUNE_MOD_NAME@_INCLUDE_DIRS "@PACKAGE_CMAKE_INSTALL_INCLUDEDIR@")
+set(@DUNE_MOD_NAME@_CXX_FLAGS "@CMAKE_CXX_FLAGS@")
+set(@DUNE_MOD_NAME@_CXX_FLAGS_DEBUG "@CMAKE_CXX_FLAGS_DEBUG@")
+set(@DUNE_MOD_NAME@_CXX_FLAGS_MINSIZEREL "@CMAKE_CXX_FLAGS_MINSIZEREL@")
+set(@DUNE_MOD_NAME@_CXX_FLAGS_RELEASE "@CMAKE_CXX_FLAGS_RELEASE@")
+set(@DUNE_MOD_NAME@_CXX_FLAGS_RELWITHDEBINFO "@CMAKE_CXX_FLAGS_RELWITHDEBINFO@")
+set(@DUNE_MOD_NAME@_LIBRARIES "dunecommon")
+set_and_check(@DUNE_MOD_NAME@_SCRIPT_DIR "@PACKAGE_SCRIPT_DIR@")
+set_and_check(DOXYSTYLE_FILE "@PACKAGE_DOXYSTYLE_DIR@/Doxystyle")
+set_and_check(DOXYGENMACROS_FILE "@PACKAGE_DOXYSTYLE_DIR@/doxygen-macros")
+set(@DUNE_MOD_NAME@_DEPENDS "@DUNE_DEPENDS@")
+set(@DUNE_MOD_NAME@_SUGGESTS "@DUNE_SUGGESTS@")
+set_and_check(@DUNE_MOD_NAME@_MODULE_PATH "@PACKAGE_DUNE_INSTALL_MODULEDIR@")
+endif(NOT @DUNE_MOD_NAME@_FOUND)
--- /dev/null
+# Install non-executable scripts
+install(FILES
+ conf.py.in
+ CreateDoxyFile.cmake
+ envdetect.py
+ FinalizeHeadercheck.cmake
+ index.rst.in
+ InstallFile.cmake
+ main77.cc.in
+ module_library.cc.in
+ pyversion.py
+ RunDoxygen.cmake
+ sphinx_cmake_dune.py
+ DESTINATION ${CMAKE_INSTALL_DATAROOTDIR}/dune/cmake/scripts)
+
+# Install executable programs
+install(PROGRAMS
+ extract_cmake_data.py
+ run-in-dune-env.sh.in
+ DESTINATION ${CMAKE_INSTALL_DATAROOTDIR}/dune/cmake/scripts
+)
--- /dev/null
+# For now we just support appending Doxyfile and Doxylocal
+file(READ ${DOXYSTYLE} file_contents)
+file(WRITE Doxyfile.in ${file_contents})
+# Write the list of predefined C preprocessor macros
+file(READ ${DOXYGENMACROS} file_contents)
+file(APPEND Doxyfile.in ${file_contents})
+if(DOXYLOCAL)
+ file(READ ${DOXYLOCAL} file_contents)
+endif()
+file(APPEND Doxyfile.in ${file_contents})
+
+# configure_file does not work as it insists an existing input file, which in our
+# needs to be generated first.
+# Therefore we read the Doxyfile.in and replace the variables using string(CONFIGURE)
+# and then write the file.
+file(READ Doxyfile.in file_contents)
+string(CONFIGURE ${file_contents} output)
+file(WRITE Doxyfile ${output})
--- /dev/null
+# this is script is called at the end of all header checks
+if(ENABLE_HEADERCHECK)
+ message("Headerchecks finished! Rerun CMake if a new file has not been checked!")
+else()
+ message("The headercheck feature is currently disabled. You can enable it by adding ENABLE_HEADERCHECK=1 to your cmake flags.")
+endif()
+
+#message("Running make clean on headercheck targets...")
+#this cleans the build directory from pollution through headerchecks but prevents caching... :/
+#file(GLOB_RECURSE list "./CMakeFiles/headercheck_*/cmake_clean.cmake")
+#foreach(item ${list})
+ # execute_process(COMMAND ${CMAKE_COMMAND} -P ${item})
+#endforeach()
\ No newline at end of file
--- /dev/null
+
+# Somehow variable list get destroyed when calling cmake (; is replaced with
+# whitespace character. Undo this change
+string(REGEX REPLACE "([a-zA-Z0-9]) ([/a-zA-Z0-9])" "\\1;\\2" files "${FILES}")
+file(INSTALL ${files} DESTINATION ${DIR})
+
--- /dev/null
+execute_process(COMMAND
+ ${DOXYGEN_EXECUTABLE} Doxyfile OUTPUT_FILE doxygen.log ERROR_FILE doxygen.log
+ TIMEOUT 3600)
--- /dev/null
+import sys
+
+sys.path.append('@DUNE_SPHINX_EXT_PATH@')
+
+extensions = ['sphinx_cmake_dune']
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+#
+html_theme = "classic"
+html_theme_options = {
+ "rightsidebar": "true",
+ "relbarbgcolor": "#eeeeee",
+ "relbartextcolor": "#353B44",
+ "relbarlinkcolor": "#353B44",
+ "headbgcolor": "white",
+ "headtextcolor": "#353B44",
+ "linkcolor": "#337AB7",
+ "visitedlinkcolor": "#337AB7",
+ "textcolor": "#353B44",
+ "footerbgcolor": "white",
+ "footertextcolor": "#353B44",
+ "codebgcolor": "#eeeeee",
+}
+
+html_sidebars = {'**': []}
+html_title = ""
--- /dev/null
+# A python script that determines whether the current interpreter is
+# running inside a virtual environment. For discussion of the implemented
+# methods, see http://stackoverflow.com/questions/1871549
+#
+# Meant to be run from DunePythonCommonMacros.cmake. For that reason, it
+# exits with either 1 or 0, where 1 indicates that the interpreter
+# runs inside a virtualenv
+#
+
+import sys
+
+# If sys.real_prefix exists, this is a virtualenv set up with the virtualenv package
+real_prefix = hasattr(sys, 'real_prefix')
+if real_prefix:
+ sys.exit(1)
+
+# If a virtualenv is set up with pyvenv, we check for equality of base_prefix and prefix
+if hasattr(sys, 'base_prefix'):
+ sys.exit(sys.prefix != sys.base_prefix)
+
+# If none of the above conditions triggered, this is probably no virtualenv interpreter
+sys.exit(0)
\ No newline at end of file
--- /dev/null
+#!/usr/bin/env python
+
+""" This script will parse a cmake module and extract some
+ rst documentation from it. This might not be as elegant as
+ writing a Sphinx domain or using a custom extension with
+ cmake related directives, but it provides a straightforward
+ working way.
+
+ This is used by dune-common to generate the build system documentation.
+ Users do not want to use this!!!
+"""
+from __future__ import print_function
+
+import argparse
+import errno
+import os
+import re
+
+def get_args():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-b', '--builddir', help='The directory where to place the produced output', required=True)
+ parser.add_argument('-m', '--module', help='The module to parse', required=True)
+ return vars(parser.parse_args())
+
+def write_line(f, line):
+ if len(line) > 2:
+ f.write(line[2:])
+ else:
+ f.write('\n')
+
+def makedirs_if_not_exists(path):
+ # Python3's os.makedirs has exist_ok=True, but this is still Python2...
+ try:
+ os.makedirs(path)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+
+def read_module(args=get_args()):
+ modname = os.path.splitext(os.path.basename(args['module']))[0]
+ modpath = os.path.join(args['builddir'], 'modules')
+ makedirs_if_not_exists(modpath)
+ modfile = os.path.join(modpath, modname + '.rst')
+ with open(args['module'], 'r') as i:
+# mod = open(modfile, 'w')
+# # Write the first block into the module rst file
+# mod.write(".. _" + modname + ":\n\n")
+# mod.write(modname + "\n")
+# mod.write("="*len(modname) + "\n\n")
+
+# listHeader = False
+ o = None
+
+ for l in i:
+ if not l.startswith('#'):
+ return
+ if l.startswith('# .. cmake_function'):
+ if o:
+ o.close()
+ cmdpath = os.path.join(args['builddir'], 'commands')
+ makedirs_if_not_exists(cmdpath)
+ try:
+ cmd = re.findall(r'# .. cmake_function:: (.*)', l)[0]
+ except IndexError as e:
+ print("CMake doc syntax error in {}: cannot parse function on line {}".format(args['module'], l))
+ raise e
+ cmdfile = os.path.join(cmdpath, cmd + ".rst")
+# if not listHeader:
+# mod.write("\nThis module defines the following functions or macros:\n\n")
+# listHeader = True
+# mod.write("* :ref:`{}`\n".format(cmd))
+ o = open(cmdfile, 'w')
+ o.write(".. _" + cmd + ":\n\n")
+ o.write(cmd + "\n")
+ o.write("="*len(cmd) + "\n\n")
+ write_line(o, l)
+ elif l.startswith('# .. cmake_variable'):
+ if o:
+ o.close()
+ varpath = os.path.join(args['builddir'], 'variables')
+ makedirs_if_not_exists(varpath)
+ try:
+ var = re.findall(r'# .. cmake_variable:: (.*)', l)[0]
+ except IndexError as e:
+ print("CMake doc syntax error in {}: cannot parse variable on line".format(args['module'], l))
+ raise e
+ varfile = os.path.join(varpath, var + ".rst")
+ o = open(varfile, 'w')
+ o.write(".. _" + var + ":\n\n")
+ o.write(var + "\n")
+ o.write("="*len(var) + "\n\n")
+ write_line(o, l)
+ elif l.startswith('# .. cmake_module'):
+ if o:
+ o.close()
+ modpath = os.path.join(args['builddir'], 'modules')
+ makedirs_if_not_exists(modpath)
+ modfile = os.path.join(modpath, modname + ".rst")
+ o = open(modfile, 'w')
+ o.write(".. _" + modname + ":\n\n")
+ o.write(modname + "\n")
+ o.write("="*len(modname) + "\n\n")
+ write_line(o, l)
+ else:
+ if o:
+ write_line(o, l)
+
+# Parse the given arguments
+read_module()
--- /dev/null
+.. title:: @PROJECT_NAME@ CMake reference
+
+.. role:: cmake(code)
+ :language: cmake
+
+Introduction
+============
+.. toctree::
+ :maxdepth: 2
+
+@CMAKE_DOC_DEPENDENCIES@
+
+.. _variableref:
+
+Input Variable reference
+========================
+.. toctree::
+ :maxdepth: 1
+ :glob:
+
+ variables/*
+
+.. _commandref:
+
+Command reference
+=================
+.. toctree::
+ :maxdepth: 1
+ :glob:
+
+ commands/*
+
+.. _moduleref:
+
+Module reference
+================
+.. toctree::
+ :maxdepth: 1
+ :glob:
+
+ modules/*
--- /dev/null
+#include<iostream>
+
+int main()
+{
+ std::cout << "This test was skipped because it failed the following CMake Conditions:" << std::endl;
+ ${FAILED_CONDITION_PRINTING}
+ return 77;
+}
--- /dev/null
+#include <string>
+
+std::size_t ${module_lib_mangled}_version()
+{
+ return ${ProjectVersionMajor} * 10000 + ${ProjectVersionMinor} * 100 + ${ProjectVersionRevision};
+}
+
+std::string ${module_lib_mangled}_version_string()
+{
+ return "${ProjectVersion}";
+}
--- /dev/null
+# This python script tries to figure out the version of a given python
+# package. This is only intended to be used from DunePythonFindPackage.cmake
+#
+# There is no unified way of specifying the version of a python package. This
+# script implements some methods. For discussion on the implemented methods see
+# http://stackoverflow.com/questions/20180543
+#
+
+import sys
+
+# Load the module passed as argument (this avoids the need for a template
+# to be configured to put the package name inhere)
+modstr = sys.argv[1]
+module = __import__(modstr)
+
+# The most common mechanism is module.__version__
+if hasattr(module, '__version__'):
+ sys.stdout.write(module.__version__)
+ sys.exit(0)
+
+# Alternative implementation: through pip (pip itself implement pip.__version__,
+# so we never get here, when checking the version of pip itself), only works if
+# package name and distribution name are the same
+import pkg_resources
+for package in pkg_resources.working_set:
+ if package.project_name == modstr and package.has_version():
+ sys.stdout.write(package.version)
+ sys.exit(0)
+
+# Give up on this one
+sys.exit(1)
--- /dev/null
+#!@BASH@
+
+source @DUNE_PYTHON_VIRTUALENV_PATH@/bin/activate
+"$@"
--- /dev/null
+""" A cmake extension for Sphinx
+
+tailored for the Dune project.
+This is used during `make doc` to build the
+build system documentation.
+"""
+
+from docutils import nodes
+from docutils.parsers.rst import Directive
+from itertools import chain
+
+class CMakeParamNode(nodes.Element):
+ pass
+
+class CMakeBriefNode(nodes.Element):
+ pass
+
+class CMakeFunction(Directive):
+ # We do require the name to be an argument
+ required_arguments = 1
+ optional_arguments = 0
+ final_argument_whitespace = False
+ has_content = True
+
+ def run(self):
+ env = self.state.document.settings.env
+
+ # Parse the content of the directive recursively
+ node = nodes.Element()
+ node.document = self.state.document
+ self.state.nested_parse(self.content, self.content_offset, node)
+
+ brief_nodes = []
+ output_nodes = []
+ positional_params = []
+ required_params = {}
+ optional_params = {}
+
+ for child in node:
+ if isinstance(child, CMakeParamNode):
+ if child["positional"]:
+ positional_params.append(child)
+ elif child["required"]:
+ required_params[child["name"]] = child
+ else:
+ optional_params[child["name"]] = child
+ elif isinstance(child, CMakeBriefNode):
+ par = nodes.paragraph()
+ self.state.nested_parse(child['content'], self.content_offset, par)
+ brief_nodes.append(par)
+ else:
+ output_nodes.append(child)
+
+ def render_required(paramnode):
+ if paramnode["multi"]:
+ sl.append(" "*5 + paramnode['name'] + ' ' + paramnode['argname'] + '1 [' + paramnode['argname'] + '2 ...]\n')
+ if paramnode["single"]:
+ sl.append(" "*5 + paramnode['name'] + ' ' + paramnode['argname'] + '\n')
+ if paramnode["option"]:
+ sl.append(" "*5 + paramnode['name'] + '\n')
+ if paramnode["special"]:
+ sl.append(" "*5 + paramnode['argname'] + '\n')
+
+ def render_optional(paramnode):
+ if paramnode["multi"]:
+ sl.append(' '*4 + '[' + paramnode['name'] + ' ' + paramnode['argname'] + '1 [' + paramnode['argname'] + '2 ...]' + ']\n')
+ if paramnode["single"]:
+ sl.append(" "*4 + '['+ paramnode['name'] + ' ' + paramnode['argname'] + ']\n')
+ if paramnode["option"]:
+ sl.append(" "*4 + '['+ paramnode['name'] + ']\n')
+ if paramnode["special"]:
+ sl.append(" "*4 + '['+ paramnode['argname'] + ']\n')
+
+ # Build the content of the box
+ sl = [self.arguments[0] + '(\n']
+
+ for paramnode in positional_params:
+ if paramnode["required"]:
+ render_required(paramnode)
+ else:
+ render_optional(paramnode)
+
+ for rp, paramnode in required_params.items():
+ render_required(paramnode)
+ for op, paramnode in optional_params.items():
+ render_optional(paramnode)
+
+ sl.append(")\n")
+ lb = nodes.literal_block(''.join(sl), ''.join(sl))
+ brief_nodes.append(lb)
+
+ dl = nodes.definition_list()
+ for paramnode in chain(positional_params, required_params.values(), optional_params.values()):
+ dli = nodes.definition_list_item()
+ dl += dli
+
+ dlit = nodes.term(text=paramnode["name"])
+ dli += dlit
+
+ dlic = nodes.definition()
+ dli += dlic
+ self.state.nested_parse(paramnode['content'], self.content_offset, dlic)
+
+ # add the parameter list to the output
+ brief_nodes.append(dl)
+
+ return brief_nodes + output_nodes
+
+class CMakeBrief(Directive):
+ required_arguments = 0
+ optional_arguments = 0
+ final_argument_whitespace = False
+ has_content = True
+
+ def run(self):
+ node = CMakeBriefNode()
+ node['content'] = self.content
+ return [node]
+
+class CMakeParam(Directive):
+ # We do require the name to be an argument
+ required_arguments = 1
+ optional_arguments = 0
+ final_argument_whitespace = False
+ option_spec = {'argname' : lambda s: s,
+ 'multi': lambda s: True,
+ 'option': lambda s: True,
+ 'positional' : lambda s: True,
+ 'required': lambda s: True,
+ 'single': lambda s: True,
+ 'special': lambda s: True
+ }
+ has_content = True
+
+ def run(self):
+ node = CMakeParamNode()
+ # set defaults:
+ node['name'] = self.arguments[0]
+ node['single'] = self.options.get('single', False)
+ node['multi'] = self.options.get('multi', False)
+ node['option'] = self.options.get('option', False)
+ node['special'] = self.options.get('special', False)
+ node['positional'] = self.options.get('positional', False)
+ node['required'] = self.options.get('required', False)
+ node['argname'] = self.options.get('argname', self.arguments[0].lower() if self.arguments[0].lower()[-1:] != 's' else self.arguments[0].lower()[:-1])
+ node['content'] = self.content
+ if node['positional']:
+ node['argname'] = ''
+ return [node]
+
+
+class CMakeVariable(Directive):
+ # We do require the name to be an argument
+ required_arguments = 1
+ optional_arguments = 0
+ final_argument_whitespace = False
+ option_spec = {'argname' : lambda s: s,
+ 'multi': lambda s: True,
+ 'option': lambda s: True,
+ 'positional' : lambda s: True,
+ 'required': lambda s: True,
+ 'single': lambda s: True
+ }
+ has_content = True
+
+ def run(self):
+ node = nodes.paragraph()
+ self.state.nested_parse(self.content, self.content_offset, node)
+ return [node]
+
+class CMakeModule(Directive):
+ required_arguments = 0
+ optional_arguments = 0
+ final_argument_whitespace = False
+ has_content = True
+
+ def run(self):
+ node = nodes.paragraph()
+ self.state.nested_parse(self.content, self.content_offset, node)
+ return [node]
+
+def setup(app):
+ app.add_node(CMakeBriefNode)
+ app.add_node(CMakeParamNode)
+ app.add_directive('cmake_module', CMakeModule)
+ app.add_directive('cmake_brief', CMakeBrief)
+ app.add_directive('cmake_function', CMakeFunction)
+ app.add_directive('cmake_param', CMakeParam)
+ app.add_directive('cmake_variable', CMakeVariable)
+
+ return {'version': '0.1'}
--- /dev/null
+/* begin dune-common
+ put the definitions for config.h specific to
+ your project here. Everything above will be
+ overwritten
+*/
+
+/* begin private */
+/* Define to the version of dune-common */
+#define DUNE_COMMON_VERSION "${DUNE_COMMON_VERSION}"
+
+/* Define to the major version of dune-common */
+#define DUNE_COMMON_VERSION_MAJOR ${DUNE_COMMON_VERSION_MAJOR}
+
+/* Define to the minor version of dune-common */
+#define DUNE_COMMON_VERSION_MINOR ${DUNE_COMMON_VERSION_MINOR}
+
+/* Define to the revision of dune-common */
+#define DUNE_COMMON_VERSION_REVISION ${DUNE_COMMON_VERSION_REVISION}
+
+/* Standard debug streams with a level below will collapse to doing nothing */
+#define DUNE_MINIMAL_DEBUG_LEVEL ${DUNE_MINIMAL_DEBUG_LEVEL}
+
+/* does the compiler support __attribute__((deprecated))? */
+#cmakedefine HAS_ATTRIBUTE_DEPRECATED 1
+
+/* does the compiler support __attribute__((deprecated("message"))? */
+#cmakedefine HAS_ATTRIBUTE_DEPRECATED_MSG 1
+
+/* does the compiler support __attribute__((unused))? */
+#cmakedefine HAS_ATTRIBUTE_UNUSED 1
+
+/* does the standard library provide experimental::make_array() ? */
+#cmakedefine DUNE_HAVE_CXX_EXPERIMENTAL_MAKE_ARRAY 1
+
+/* does the standard library provide experimental::is_detected ? */
+#cmakedefine DUNE_HAVE_CXX_EXPERIMENTAL_IS_DETECTED 1
+
+/* does the standard library provide identity ? */
+#cmakedefine DUNE_HAVE_CXX_STD_IDENTITY 1
+
+/* Define if you have a BLAS library. */
+#cmakedefine HAVE_BLAS 1
+
+/* Define if you have LAPACK library. */
+#cmakedefine HAVE_LAPACK 1
+
+/* Define if you have the MPI library. */
+#cmakedefine HAVE_MPI ENABLE_MPI
+
+/* Deactivate cxx bindings for MPI */
+#if defined(HAVE_MPI) && HAVE_MPI
+#define MPICH_SKIP_MPICXX 1
+#define OMPI_SKIP_MPICXX 1
+#define MPI_NO_CPPBIND 1
+#define MPIPP_H
+#define _MPICC_H
+#endif
+
+/* Define if you have the GNU GMP library. The value should be ENABLE_GMP
+ to facilitate activating and deactivating GMP using compile flags. */
+#cmakedefine HAVE_GMP ENABLE_GMP
+
+/* Define if you have the GCC Quad-Precision library. The value should be ENABLE_QUADMATH
+ to facilitate activating and deactivating QuadMath using compile flags. */
+#cmakedefine HAVE_QUADMATH ENABLE_QUADMATH
+
+/* Define if you have the Vc library. The value should be ENABLE_VC
+ to facilitate activating and deactivating Vc using compile flags. */
+#cmakedefine HAVE_VC ENABLE_VC
+
+/* Define to 1 if you have the Threading Building Blocks (TBB) library */
+#cmakedefine HAVE_TBB 1
+
+/* begin private */
+
+/* Name of package */
+#define PACKAGE "dune-common"
+
+/* Define to the address where bug reports for this package should be sent. */
+#define PACKAGE_BUGREPORT "@DUNE_MAINTAINER@"
+
+/* Define to the full name of this package. */
+#define PACKAGE_NAME "@DUNE_MOD_NAME@"
+
+/* Define to the full name and version of this package. */
+#define PACKAGE_STRING "@DUNE_MOD_NAME@ @DUNE_MOD_VERSION@"
+
+/* Define to the one symbol short name of this package. */
+#define PACKAGE_TARNAME "@DUNE_MOD_NAME@"
+
+/* Define to the home page for this package. */
+#define PACKAGE_URL "@DUNE_MOD_URL@"
+
+/* Define to the version of this package. */
+#define PACKAGE_VERSION "@DUNE_MOD_VERSION@"
+
+/* Version number of package */
+#define VERSION "@DUNE_MOD_VERSION@"
+
+/* end private */
+
+
+/* old feature support macros which were tested until 2.7, kept around for one more release */
+/* As these are now always supported due to the new compiler requirements, they are directly */
+/* defined without an explicit test. */
+#define DUNE_HAVE_CXX_CLASS_TEMPLATE_ARGUMENT_DEDUCTION 1
+#define DUNE_HAVE_CXX_OPTIONAL 1
+#define DUNE_HAVE_CXX_VARIANT 1
+#define DUNE_SUPPORTS_CXX_THROW_IN_CONSTEXPR 1
+#define DUNE_HAVE_C_ALIGNED_ALLOC 1
+#define DUNE_HAVE_CXX_BOOL_CONSTANT 1
+#define DUNE_HAVE_CXX_EXPERIMENTAL_BOOL_CONSTANT 0
+#define DUNE_HAVE_HEADER_EXPERIMENTAL_TYPE_TRAITS 0
+#define DUNE_HAVE_CXX_APPLY 1
+#define DUNE_HAVE_CXX_EXPERIMENTAL_APPLY 0
+#define HAVE_IS_INDEXABLE_SUPPORT 1
+
+/* Define to ENABLE_UMFPACK if the UMFPack library is available */
+#cmakedefine HAVE_UMFPACK ENABLE_SUITESPARSE
+
+/* Define to ENABLE_SUITESPARSE if the SuiteSparse library is available */
+#cmakedefine HAVE_SUITESPARSE ENABLE_SUITESPARSE
+
+/* Define to ENABLE_SUITESPARSE if the SuiteSparse's AMD library is available */
+#cmakedefine HAVE_SUITESPARSE_AMD ENABLE_SUITESPARSE
+
+/* Define to ENABLE_SUITESPARSE if the SuiteSparse's BTF library is available */
+#cmakedefine HAVE_SUITESPARSE_BTF ENABLE_SUITESPARSE
+
+/* Define to ENABLE_SUITESPARSE if the SuiteSparse's CAMD library is available */
+#cmakedefine HAVE_SUITESPARSE_CAMD ENABLE_SUITESPARSE
+
+/* Define to ENABLE_SUITESPARSE if the SuiteSparse's CCOLAMD library is available */
+#cmakedefine HAVE_SUITESPARSE_CCOLAMD ENABLE_SUITESPARSE
+
+/* Define to ENABLE_SUITESPARSE if the SuiteSparse's CHOLMOD library is available */
+#cmakedefine HAVE_SUITESPARSE_CHOLMOD ENABLE_SUITESPARSE
+
+/* Define to ENABLE_SUITESPARSE if the SuiteSparse's COLAMD library is available */
+#cmakedefine HAVE_SUITESPARSE_COLAMD ENABLE_SUITESPARSE
+
+/* Define to ENABLE_SUITESPARSE if the SuiteSparse's CXSPARSE library is available */
+#cmakedefine HAVE_SUITESPARSE_CXSPARSE ENABLE_SUITESPARSE
+
+/* Define to ENABLE_SUITESPARSE if the SuiteSparse's KLU library is available */
+#cmakedefine HAVE_SUITESPARSE_KLU ENABLE_SUITESPARSE
+
+/* Define to ENABLE_SUITESPARSE if the SuiteSparse's LDL library is available */
+#cmakedefine HAVE_SUITESPARSE_LDL ENABLE_SUITESPARSE
+
+/* Define to ENABLE_SUITESPARSE if the SuiteSparse's RBIO library is available */
+#cmakedefine HAVE_SUITESPARSE_RBIO ENABLE_SUITESPARSE
+
+/* Define to ENABLE_SUITESPARSE if the SuiteSparse's SPQR library is available
+ and if it's version is at least 4.3 */
+#cmakedefine HAVE_SUITESPARSE_SPQR ENABLE_SUITESPARSE
+
+/* Define to ENABLE_SUITESPARSE if the SuiteSparse's UMFPACK library is available */
+#cmakedefine HAVE_SUITESPARSE_UMFPACK ENABLE_SUITESPARSE
+
+/* Define to 1 if METIS is available */
+#cmakedefine HAVE_METIS 1
+
+/* Define to 1 if the Scotch replacement for METIS is used. */
+#cmakedefine HAVE_SCOTCH_METIS 1
+
+/* Define to 1 if you have the ParMETIS library. */
+#cmakedefine HAVE_PARMETIS 1
+
+/* Define to 1 if the PTScotch replacement for ParMETIS is used. */
+#cmakedefine HAVE_PTSCOTCH_PARMETIS 1
+
+/* Define to 1 if PT-Scotch is available */
+#cmakedefine HAVE_PTSCOTCH 1
+
+/* Used to call lapack functions */
+#cmakedefine LAPACK_NEEDS_UNDERLINE
+
+/* end dune-common
+ Everything below here will be overwritten
+*/
--- /dev/null
+add_subdirectory("doxygen")
+add_subdirectory("buildsystem")
+add_subdirectory("comm")
+install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/dunecontrol.1
+ DESTINATION ${CMAKE_INSTALL_MANDIR}/man1)
--- /dev/null
+# Install the buildsystem documentation defined in dune-common
+install(FILES dune-common.rst DESTINATION ${CMAKE_INSTALL_DOCDIR})
+
+# Also always build the CMake API documentation in dune-common
+dune_cmake_sphinx_doc()
--- /dev/null
+===========
+dune-common
+===========
+
+.. _whatis:
+
+What is CMake anyway?
+=====================
+
+CMake...
+
+- is an open source build system tool developed at KITware.
+- offers a one-tool-solution to all building tasks, like configuring, building, linking, testing and packaging.
+- is a build system generator: It supports a set of backends called *generators*
+- is portable
+- is controlled by ONE rather simple language
+
+You can install CMake through your favorite package manager or downloading source code from
+`KITWare <http://www.cmake.org>`_
+The minimum required version to build Dune with CMake is 3.13.
+
+.. _howtouse:
+
+How do I use Dune with CMake?
+=============================
+
+The build process is controlled by the script :code:`dunecontrol`, located in :code:`dune-common/bin`.
+There is a compatibility layer that will translate all the configure flags from your opts file into the corresponding
+CMake flags. While this is a great tool to determine how to do the transition, in the long run you should switch to
+a CMake-only approach.
+
+:code:`dunecontrol` will pickup the variable :code:`CMAKE_FLAGS` from your opts file and use it as command line options for
+any call to CMake. There, you can define variables for the configure process with CMake's :code:`-D` option; just as
+with the C pre-processor.
+
+The most important part of the configure flags is to tell the build system where to look for external libraries.
+You can browse the :ref:`variableref` section of this documentation for a list of variables that are picked up
+by the Dune CMake build system.
+
+.. _whatfiles:
+
+What files in a dune module belong to the CMake build system?
+=============================================================
+
+Every directory in a project contains a file called :code:`CMakeLists.txt`, which is written in the CMake language.
+You can think of these as a distributed configure script. Upon configure, the top-level :code:`CMakeLists.txt` is executed.
+Whenever an :ref:`add_subdirectory` command is encountered, the :code:`CMakeLists.txt` file of that sub-directory is executed.
+The top-level :code:`CMakeLists.txt` file is special, because it sets up the entire Dune module correctly. You should not delete the
+auto-generated parts of it.
+
+Additionally, a Dune module can export some cmake modules. A cmake module is a file that contains one or
+more build system macros meant for downstream use. If a module provides modules, they can be found in
+the subfolder :code:`cmake/modules`. The module :code:`dune-foo/cmake/modules/DuneFooMacros.cmake` in a module
+:code:`dune-foo` is special however: Its contents are always executed when configuring the module
+:code:`dune-foo` or any other Dune module, that requires or suggests the module :code:`dune-foo`.
+This is the perfect place to put your checks for external packages, see below.
+
+The file :code:`config.h.cmake` defines a template for the section of :code:`config.h`, that is generated by the module.
+
+.. _flags:
+
+How do I modify the flags and linked libraries of a given target?
+=================================================================
+
+Again, there are multiple ways to do this. The Dune build system offers macros to make this task as
+easy as possible. For each external module, there is a macro :code:`add_dune_*_flags`. Those macros should
+cover most flags. Example usage:
+
+.. code-block:: cmake
+
+ add_executable(foo foo.cc)
+ add_dune_umfpack_flags(foo)
+ add_dune_mpi_flags(foo)
+
+There is also the macro :ref:`add_dune_all_flags`, which uses the same flag registry mechanism as the simplified
+build system in section :ref:`simplified`.
+
+If you want to fully control the configuration of the targets, you can do so. Build system entities such
+as targets, directories and tests do have so called properties in CMake. You can access and modify those
+properties via the commands :code:`get_property` and :code:`set_property`. You can for example use those
+to modify a targets :code:`COMPILE_DEFINITIONS` or :code:`INCLUDE_DIRECTORIES` property:
+
+.. code-block:: cmake
+
+ add_executable(foo foo.cc)
+ set_property(TARGET foo APPEND PROPERTY COMPILE_DEFINITIONS <somedefinition>)
+ set_property(TARGET foo APPEND PROPERTY INCLUDE_DIRECTORIES <somepath>)
+
+For a full list of properties, check the manual:
+
+.. code-block:: bash
+
+ cmake --help-property-list
+
+Manually linking libraries can be done through the :code:`target_link_libraries` command instead of manually
+tweaking properties.
+
+.. _external:
+
+How do I link against external libraries, that are not checked for by Dune?
+===========================================================================
+
+While there might be many solutions that make your application work, there is only one clean solution to this: You have
+to provide a find module for the package. A find module is a CMake module that follows a specific naming scheme: For
+an external package called :code:`SomePackage` it is called :code:`FindSomePackage.cmake`. Note that CMake
+treats package names case sensitive. If CMake encounters a :code:`find_package(SomePackage)` line, it searches
+its module include paths for this find module. A good read to get started writing a find module is
+`this page <http://www.cmake.org/Wiki/CMake:How_To_Find_Libraries>`_ in the CMake wiki.
+
+Depending on how common your external package is, you may not even need to write the find module on your own.
+You can have a look at the list of find modules shipped by CMake or simply search the
+internet for the module name and profit from other open-source project's work.
+
+It is considered good style to also provide a macro :code:`add_dune_somepackage_flags`.
+
+.. _outofsource:
+
+What is an out-of-source build?
+===============================
+
+An out-of-source build does leave the version-controlled source tree untouched and puts all files that are
+generated by the build process into a different directory -- the build directory. The build directory does mirror
+your source tree's structure as seen in the following. Assume the following source directory structure:
+
+::
+
+ dune-foo/
+ CMakeLists.txt
+ dune/
+ foo/
+ CMakeLists.txt
+ src/
+ CMakeLists.txt
+
+The generated build directory will have the following structure, where the directory :code:`build-cmake`
+is a subdirectory of the source directory:
+
+::
+
+ build-cmake/
+ Makefile
+ dune/
+ foo/
+ Makefile
+ src/
+ Makefile
+
+Using the :code:`Unix Makefiles` generator, your Makefiles are generated in the build tree, so that is where you
+have to call :code:`make`. There are multiple advantages with this approach, such as a clear separation between
+version controlled and generated files and you can have multiple out-of-source builds with different configurations
+at the same time.
+
+Out-of-source builds are the default with CMake. In-source builds are strongly discouraged.
+
+By default, a subfolder :code:`build-cmake` is generated within each dune module and is used as a build directory.
+You can customize this folder through the :code:`--builddir` option of :code:`dunecontrol`. Give an absolute path to
+the :code:`--builddir` option, you will get something like this:
+
+::
+
+ build/
+ dune-common/
+ Makefile
+ dune-foo/
+ Makefile
+
+So, instead of one build directory in every dune module, you will be able to collect all build directories in one
+directory. This makes it much easier to have multiple build directories and to remove build directories.
+
+.. _simplified:
+
+What is the simplified build system and how do I use it?
+========================================================
+
+Dune offers a simplified build system, where all flags are added to all targets and all libraries are linked to all targets. You can enable the feature
+by calling :ref:`dune_enable_all_packages` in the top-level :code:`CMakeLists.txt` file of your project, before you add any subdirectories.
+
+This will modify all targets in the directory of the :code:`CMakeLists.txt`, where you put this, and also in all
+subdirectories. The compile flags for all found external packages are added to those targets and the target is
+linked against all found external libraries.
+
+To use this while using custom external packages, you have to register your flags to the mechanism.
+Also, some special care has to be given, if your module does build one or more library which targets within the module do link against.
+
+Carefully read the following documentation in those cases:
+
+* :ref:`dune_enable_all_packages`
+* :ref:`dune_register_package_flags`
+* :ref:`dune_library_add_sources`
+
+.. _compiler:
+
+How do I change my compiler and compiler flags?
+===============================================
+
+In general, there are multiple ways to do this:
+
+* Setting the CMake variables :ref:`CMAKE_<LANG>_COMPILER` (with :code:`LANG` being :code:`C`
+ or :code:`CXX`) from the opts file, e.g. via :code:`CMAKE_FLAGS="-DCMAKE_CXX_COMPILER=otherc++"`.
+* Setting those variables within the project with the :code:`set` command
+* Setting the environment variables :code:`CC`, :code:`CXX`, :code:`FC` etc.
+
+The first option is the recommended way. Whenever you change your compiler, you should delete all build
+directories. For some CMake versions, there is a known CMake bug, that requires you to give an absolute path
+to your compiler, but Dune will issue a warning, if you violate that.
+
+You can modify your default compiler flags by setting the variables
+:ref:`CMAKE_<LANG>_FLAGS` in your opts file (again with :code:`LANG` being :code:`C` or
+:code:`CXX`).
+
+.. _symlink:
+
+How should I handle ini and grid files in an out-of-source-build setup?
+=======================================================================
+
+Such files are under version control, but they are needed in the build directory.
+There are some CMake functions targeting this issue:
+
+* :ref:`dune_symlink_to_source_tree`
+* :ref:`dune_symlink_to_source_files`
+* :ref:`dune_add_copy_command`
+* :ref:`dune_add_copy_dependency`
+* :ref:`dune_add_copy_target`
+
+The simplest way to solve the problem is to set the variable :ref:`DUNE_SYMLINK_TO_SOURCE_TREE` to your opts file.
+This will execute :ref:`dune_symlink_to_source_tree` in your top-level :code:`CMakeLists.txt`. This will add a symlink
+:code:`src_dir` to all subdirectories of the build directory, which points to the corresponding directory of the source
+tree. This will only work on platforms that support symlinking.
+
+.. _ides:
+
+How do I use CMake with IDEs?
+=============================
+
+As already said, CMake is merely a build system generator with multiple backends (called a generator). Using IDEs requires
+a different generator. Check :code:`cmake --help` for a list of generators. You can then add the :code:`-G` to the :code:`CMAKE_FLAGS` in your opts file.
+Note that the generator name has to match character by character, including case and spaces.
+
+To configure highlighting of CMake errors in Emacs' compilation mode, include
+the following in your :code:`~./emacs` (see the `Emacs bug
+<http://debbugs.gnu.org/cgi/bugreport.cgi?bug=22944>`_):
+
+.. code-block:: elisp
+
+ (setq compilation-error-regexp-alist-alist
+ `((cmake "^CMake \\(?:Error\\|\\(Warning\\)\\) at \\(.*\\):\\([1-9][0-9]*\\) ([^)]+):$"
+ 2 3 nil (1))
+ (cmake-info "^ \\(?: \\*\\)?\\(.*\\):\\([1-9][0-9]*\\) ([^)]+)$"
+ 2 3 nil 0)
+ . ,compilation-error-regexp-alist-alist))
+
+Then customize the option :code:`compilation-error-regexp-alist` and add the
+two predefined symbols :code:`cmake` and :code:`cmake-info` to the list.
+
+.. _cxxflags:
+
+I usually modify my CXXFLAGS upon calling make. How can I do this in CMake?
+===========================================================================
+
+This violates the CMake philosophy and there is no clean solution to achieve it. The CMake-ish solution would be
+to have for each configuration one out-of-source build. We have nevertheless implemented a workaround. It can be enable
+by setting the variable :ref:`ALLOW_CXXFLAGS_OVERWRITE` in your opts file. You can then type:
+
+.. code-block:: bash
+
+ make CXXFLAGS="<your flags>" <target>
+
+Furthermore any C pre-processor variable of the form :code:`-DVAR=<value>` can be overloaded on the command line
+and the grid type can be set via :code:`GRIDTYPE="<grid type>"`.
+
+Note this only works with generators that are based on Makefiles and several Unix tools like bash must be
+available.
+
+.. _test:
+
+How do I run the test suite from CMake?
+=======================================
+
+The built-in target to run the tests is called :code:`test` instead of Autotools' :code:`check`.
+It is a mere wrapper around CMake's own testing tool CTest. You can check :code:`ctest --help`
+for a lot of useful options, such as choosing the set of tests to be run by matching regular expressions or
+showing the output of failed tests.
+
+The test programs are not built automatically. You need to build them manually
+before running them using :code:`make build_tests`.
+
+The Dune test suite also defines tests that run in parallel. You may set an upper bound to the number
+of cores in use for a single test by setting :ref:`DUNE_MAX_TEST_CORES`.
+
+.. _disable:
+
+Can I disable an external dependency?
+=====================================
+
+To disable an external dependency :code:`Foo`, add
+
+::
+
+ -DCMAKE_DISABLE_FIND_PACKAGE_Foo=TRUE
+
+to your opts file. The name of the dependency is case sensitive but there is no canonical naming
+scheme. See the output of configure to get the right name.
+
+Make sure to not use cached configure results by deleting the cache file or the build directory, cf.
+:ref:`troubleshoot`.
+
+.. _parallel:
+
+How do I switch between parallel and sequential builds?
+=======================================================
+
+Dune builds with CMake are parallel if and only if MPI is found. To have a sequential build despite an
+installed MPI library, you have to explicitly disable the corresponding find module by setting
+
+::
+
+ -DCMAKE_DISABLE_FIND_PACKAGE_MPI=TRUE
+
+in the :code:`CMAKE_FLAGS` of your opts file, as described in section :ref:`disable`.
+
+.. _headercheck:
+
+Why is it not possible anymore to do make headercheck?
+======================================================
+
+The headercheck feature has been disabled by default. You can enable it by setting the CMake variable :ref:`ENABLE_HEADERCHECK`
+through your opts file. This step has been necessary, because of the large amount of additional file the headercheck adds to the
+build directory. A better implementation has not been found yet, because it simply does not fit the CMake philosophy.
+
+.. _packages:
+
+How do I create tarballs or packages?
+=====================================
+
+To create source code packages, also known as tarballs, run `git archive` within your
+module's Git repository.
+
+There is no default way to create binary packages like Deb or RPM packages. You can use
+the Open Build Service for openSuse RPMs and related distributions. Or create packages according
+to the distribution of your choice like the tools around dpkg-buildpackage and debuild
+for Debian.
+
+CMake has a packaging tool CPack, but with CPack you are on your own. In the past, our
+results based on CPack were not satisfying.
+
+.. _dune-python:
+
+How does the Dune build system handle Python?
+=============================================
+
+dune-common contains a build system extension to handle many python-related aspects. You can
+read more on this in the module description :ref:`DunePythonCommonMacros` and the pieces of
+documentation mentioned inthere.
+
+.. _troubleshoot:
+
+How do I troubleshoot?
+======================
+
+CMake caches aggressively which makes it bad at recognizing changed configurations.
+To trigger a fresh run of configure, you can delete the :code:`CMakeCache.txt` file from
+the build directory and maybe save some compilation time afterward.
+
+Whenever you experience any problems, your first step should be to delete all build directories. Nice trick:
+
+::
+
+ dunecontrol exec "rm -rf build-cmake"
+
+This will remove all build directories from all DUNE modules.
+
+Later on you can get an error log from the file :code:`CMakeError.log` in the :code:`CMakeFiles`
+subdirectory of your build directory. This is what you should send to the mailing list alongside the
+description of your setup and efforts to help us help you.
+
+Where can I get help?
+=====================
+
+The CMake manual is available on the command line:
+
+* :code:`cmake --help-command-list`
+* :code:`cmake --help-command <command>`
+* :code:`cmake --help-property-list`
+* :code:`cmake --help-property <property>`
+* :code:`cmake --help-module-list`
+* :code:`cmake --help-module <module>`
+
+To get help on which variables are picked up by CMake, there is a CMake wiki page collecting them.
+Of course, there is also Google, StackOverflow and the CMake Mailing list (archive).
+For problems specific to DUNE's build system, ask on our mailing lists.
--- /dev/null
+# Sample toolchain file for building for Windows from an Ubuntu Linux system.
+#
+# Typical usage:
+# *) install cross compiler: `sudo apt-get install mingw-w64 g++-mingw-w64`
+# *) cd build
+# *) cmake -DCMAKE_TOOLCHAIN_FILE=~/Toolchain-Ubuntu-mingw32.cmake ..
+
+set(CMAKE_SYSTEM_NAME Windows)
+set(TOOLCHAIN_PREFIX i686-w64-mingw32)
+
+# cross compilers to use for C and C++
+set(CMAKE_C_COMPILER ${TOOLCHAIN_PREFIX}-gcc-posix)
+set(CMAKE_CXX_COMPILER ${TOOLCHAIN_PREFIX}-g++-posix)
+set(CMAKE_Fortran_COMPILER ${TOOLCHAIN_PREFIX}-gfortran-posix)
+set(CMAKE_RC_COMPILER ${TOOLCHAIN_PREFIX}-windres)
+
+# enable to generate fully static binaries
+# set(CMAKE_EXE_LINKER_FLAGS "-static -static-libgcc -static-libstdc++" CACHE STRING "executable linker flags")
+
+# target environment on the build host system
+# set 1st to dir with the cross compiler's C/C++ headers/libs
+set(CMAKE_FIND_ROOT_PATH /usr/${TOOLCHAIN_PREFIX})
+
+# modify default behavior of FIND_XXX() commands to
+# search for headers/libs in the target environment and
+# search for programs in the build host environment
+set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
+set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
+set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
+
+# enable/disable some hardware specific feature
+set(THREADS_PTHREAD_ARG "-pthread")
+set(STDTHREAD_LINK_FLAGS "-pthread")
+set(STDTHREAD_WORKS true)
--- /dev/null
+BUILDDIR=build-mingw
+CMAKE_FLAGS="-DCMAKE_TOOLCHAIN_FILE=$PATH_dune_common/Toolchain-Ubuntu-mingw32.cmake"
--- /dev/null
+add_executable(poosc08 "poosc08.cc")
+target_link_libraries(poosc08 PUBLIC "dunecommon")
+
+add_executable(poosc08_test "poosc08_test.cc")
+target_link_libraries(poosc08_test PUBLIC "dunecommon")
+
+add_executable(indexset "indexset.cc")
+target_link_libraries(indexset PUBLIC "dunecommon")
+
+add_dune_mpi_flags("poosc08;poosc08_test;indexset")
+
+dune_add_latex_document(
+ SOURCE communication.tex
+ FATHER_TARGET doc
+ BUILD_ON_INSTALL
+ INSTALL ${CMAKE_INSTALL_DOCDIR}/comm)
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef BUILDINDEXSET_HH
+#define BUILDINDEXSET_HH
+
+
+#include <dune/common/parallel/indexset.hh>
+#include <dune/common/parallel/plocalindex.hh>
+
+/**
+ * @brief Flag for marking the indices.
+ */
+enum Flag {owner, overlap};
+
+// The type of local index we use
+typedef Dune::ParallelLocalIndex<Flag> LocalIndex;
+
+/**
+ * @brief Add indices to the example index set.
+ * @param indexSet The index set to build.
+ */
+template<class C, class TG, int N>
+void build(C& comm, Dune::ParallelIndexSet<TG,LocalIndex,N>& indexSet)
+{
+
+
+
+
+ // The rank of our process
+ int rank=comm.rank();
+
+ // Indicate that we add or remove indices.
+ indexSet.beginResize();
+
+ if(rank==0) {
+ indexSet.add(0, LocalIndex(0,overlap,true));
+ indexSet.add(2, LocalIndex(1,owner,true));
+ indexSet.add(6, LocalIndex(2,owner,true));
+ indexSet.add(3, LocalIndex(3,owner,true));
+ indexSet.add(5, LocalIndex(4,owner,true));
+ }
+
+ if(rank==1) {
+ indexSet.add(0, LocalIndex(0,owner,true));
+ indexSet.add(1, LocalIndex(1,owner,true));
+ indexSet.add(7, LocalIndex(2,owner,true));
+ indexSet.add(5, LocalIndex(3,overlap,true));
+ indexSet.add(4, LocalIndex(4,owner,true));
+ }
+
+ // Modification is over
+ indexSet.endResize();
+}
+#endif
--- /dev/null
+@InProceedings{ISTL,
+ author = {Markus Blatt and Peter Bastian},
+ title = {The Iterative Solver Template Library},
+ booktitle = {Applied Parallel Computing. State of the Art in Scientific Computing},
+ editor = {Bo K\r{a}gstr\"om and Erik Elmroth and Jack Dongarra and Jerzy Wa\'sniewski},
+ year = 2007,
+ volume = 4699,
+ series = {Lecture Notes in Computer Science},
+ publisher = {Springer},
+ pages = {666--675}
+}
+
+@Article{dune08-1,
+ author = {Peter Bastian and Markus Blatt and Andreas Dedner and Christian Engwer and Robert Kl\"ofkorn and Mario Ohlberger and Oliver Sander},
+ title = { A generic grid interface for parallel and adaptive scientific computing. Part I: abstract framework},
+ journal = {Computing},
+ year = 2008,
+ volume = 82,
+ number = {2--3},
+ pages = {103--119}
+}
+@Article{dune08-2,
+ author = {Peter Bastian and Markus Blatt and Andreas Dedner and Christian Engwer and Robert Kl\"ofkorn and Ralf Kornhuber and Mario Ohlberger and Oliver Sander},
+ title = { A generic grid interface for parallel and adaptive scientific computing. Part II: implementation and test in DUNE},
+ journal = {Computing},
+ year = 2008,
+ volume = 82,
+ number = {2--3} ,
+ pages = {121--138}
+}
+@Article{ISTLParallel,
+ author = {Markus Blatt and Peter Bastian},
+ title = {On the Generic Parallelisation of Iterative Solvers for
+ the Finite Element Method},
+ journal = {Int. J. Computational Science and
+ Engineering},
+ volume = {4},
+ number = {1},
+ pages = {56--69},
+ year = 2008
+}
+
+@Misc{DuneWeb,
+ author = {DUNE},
+ howpublished = {\texttt{http://www.dune-project.org/}}
+}
+@Misc{boost_mpi,
+ author = {D. Gregor and M. Troyer},
+ title = {{B}oost.{M}{P}{I}},
+ howpublished = {\texttt{http://www.boost.org/}},
+ year = 2006
+}
+
+@PhdThesis{gerlach02:janus,
+ author = {Jens Gerlach},
+ title = {Domain Engineering and Generic Programming for Parallel Scientific Computing},
+ school = {TU Berlin},
+ year = {2002}
+}
+
+@InProceedings{giloi95:_promot,
+ author = {W.K. Giloi and M. Kessler and A. Schramm},
+ title = {PROMOTER: A High Level, Object-Parallel Programming Language},
+ booktitle = {Proceedings of the International Conference on High Performance Computing},
+ year = {1995},
+ address = {New Dehli, India},
+ month = {December}
+}
+
+@inproceedings{nolte00:_taco,
+ author = {J\"{o}rg Nolte and Mitsuhisa Sato and Yutaka Ishikawa},
+ title = {TACO -- Dynamic Distributed Collections with Templates and Topologies},
+ booktitle = {Euro-Par '00: Proceedings from the 6th International Euro-Par Conference on Parallel Processing},
+ year = {2000},
+ isbn = {3-540-67956-1},
+ pages = {1071--1080},
+ publisher = {Springer-Verlag},
+ address = {London, UK},
+ }
\ No newline at end of file
--- /dev/null
+\documentclass[11pt]{article}
+\usepackage{multicol}
+\usepackage{ifthen}
+\usepackage{amsthm}
+\usepackage{amsmath}
+\usepackage{amsfonts}
+\usepackage{color}
+\usepackage{graphicx}
+\usepackage{hyperref}
+\usepackage{psfrag}
+\usepackage{subfigure}
+\usepackage[dvips]{epsfig}
+\usepackage[dvips]{graphicx}
+\usepackage[a4paper,body={148mm,240mm,nohead}]{geometry}
+\usepackage[ansinew]{inputenc}
+\usepackage{tikz}
+\usepackage{listings}
+\lstset{language=C++, basicstyle=\ttfamily,
+ stringstyle=\ttfamily, commentstyle=\it, extendedchars=true}
+
+\newtheorem{theorem}{Theorem}[section]
+\newtheorem{lemma}[theorem]{Lemma}
+
+\theoremstyle{definition}
+\newtheorem{definition}[theorem]{Definition}
+\newtheorem{class}[theorem]{Class}
+\newtheorem{algorithm}[theorem]{Algorithm}
+\theoremstyle{remark}
+\newtheorem{remark}[theorem]{Remark}
+
+\newcommand{\C}{\mathbb{C}}
+\newcommand{\R}{\mathbb{R}}
+\newcommand{\N}{\mathbb{N}}
+\newcommand{\Z}{\mathbb{Z}}
+\newcommand{\Q}{\mathbb{Q}}
+\newcommand{\K}{\mathbb{K}}
+\newcommand{\loc}{\mbox{loc}}
+
+\title{Communication within the Iterative Solver Template Library (ISTL)\thanks{Part of the
+ Distributed and Unified Numerics Environment (DUNE) which is
+ available from the site
+ \texttt{http://www.dune-project.org/}}}
+
+\author{%
+Markus Blatt\\
+Interdisziplinäres Zentrum für Wissenschaftliches Rechnen,\\
+Universität Heidelberg, Im Neuenheimer Feld 368, D-69120 Heidelberg, \\
+email: \texttt{Markus.Blatt@iwr.uni-heidelberg.de}}
+
+\date{April 27, 2005}
+
+\begin{document}
+
+\maketitle
+
+\begin{abstract}
+ This document describes usage and interface of the classes meant for
+ setting up the communication within a parallel program using
+ ISTL. As most of the communication in distributed program occur in
+ the same pattern it is often more efficient (and of course more easy
+ for the programmer) to build the communication pattern once in the
+ program and then use multiple times (e.~g. at each iteration step
+ of an iterative solver).
+\end{abstract}
+
+\begin{multicols}{2}
+{\small\tableofcontents}
+\end{multicols}
+
+
+\section{Introduction}
+\label{sec:introduction}
+
+When using the data parallel programming model a set of processes
+works collectively on the same set of finite data objects. These might
+be elements of a finite element grid or vector entries in a linear algebra
+computation. Each process works on different partitions of the global
+data. Only for this partition it computes updated values.
+
+In large scale parallel codes it is advisable to store the data
+partition in a local data structure directly in the local memory of
+the process. Due to data dependencies the process needs to access data
+in the partition of other processes, too. This can either be done by
+communicating these values on demand between the processes whenever
+they are accessed. This results in data structures that are aware of
+the data distribution. Or by augmenting the partition of the process such
+that it additionally includes the data values that the other values
+depend on. Note that now the partitioning is not disjoint any more but
+overlapping. Of course the values other processes compute for need to
+be updated using communication at so called synchronisation points of
+the algorithm
+
+In the latter case the data structures do not need to know anything
+about the data distribution.
+This demands more effort from the parallel algorithm designer to make
+sure that the data used for computations is valid, i.e. contains an
+updated value if another process computes the data for it. Still it allows
+for fewer synchronisation points in the algorithms as even in collective
+operations all input data may already be updated from other processes
+due to a previous operation. Between the necessary synchronisation
+points one can take advantage of the fast local memory
+access.
+
+Consider representing a random access container $x$ on a set of
+processes ${\cal P}=\{0, \ldots, P-1\}$. It is represented by individual
+pieces $x^p$, where $x^p$ is the piece stored on
+process $p$ of the $P$ processes participating in the
+calculation. Although the global representation of the container is
+not available on any process, a process $p$ needs to know how the
+entries of its local piece $x^p$ correspond to the entries of the
+global container $x$, which would be used in a sequential program.
+
+\section{Communication Software Components}
+\label{sec:comm-softw-comp}
+
+From an abstract point of view a random access container $x: I
+\rightarrow K$ provides a
+mapping from an index set $I \subset \N_0$ onto a set of objects
+$K$. Note that we do not require $I$ to be consecutive. The piece
+$x_p$ of the container $x$ stored on process $p$ is a mapping $x_p:I_p
+\rightarrow K$, where $I_p \subset I$. Due to efficiency the entries
+of $x_p$ should be stored consecutively in memory.
+This means that for the local computation the data must be addressable
+by a consecutive index starting from $0$.
+
+When using adaptive
+discretisation methods there might be the need to reorder the indices
+after adding and/or deleting some of the discretisation
+points. Therefore this index does not need to be persistent
+and can easily be changed. We will call this index {\em\index{local index}local index}.
+
+For the communication phases of our algorithms these locally stored
+entries must also be addressable by a global identifier. It is used to
+store the received values at and to retrieve the values to be sent from the
+correct local position in the consecutive memory chunk. To ease the
+addition and removal of discretisation points this global identifier has
+to be persistent but does not need to be consecutive. We
+will call this global identifier {\em\index{global index}global index}.
+
+\subsection{ParallelIndexSet}
+ Let $I \subset \N_0$ be an arbitrary, not necessarily consecutive,
+ index set identifying all discretisation points of the computation.
+ Furthermore, let
+ $$({I}_p)_{p\in {\cal P}}, \quad
+ \bigcup\limits_{p \in {\cal P}} {I}_p = I$$ be an overlapping decomposition of the global index set
+ $I$ into the sets of indices ${I}_p$ corresponding to the
+ global indices of the values stored locally in the chunk of process $p$.
+
+ Then the class
+ \begin{lstlisting}{}
+ template<typename TG, typename TL> class ParallelIndexSet;
+ \end{lstlisting}
+ realises the one to one mapping
+ $$
+ \gamma_p\::\: {I}_p \longrightarrow {I}^{\loc}_p := [0, n_p)
+ $$
+ of the globally unique index onto the local index.
+
+ The template parameter \lstinline!TG! is the type of the global
+ index and
+ \lstinline!TL! is the type of the local index. The only prerequisite
+ of \lstinline!TG! is that objects of this type are comparable using
+ the less-than-operator \lstinline!<!. Not that this prerequisite
+ still allows attaching further
+ information to the global index or even using this information as
+ the global index. The type \lstinline!TL! has to
+ be convertible to \lstinline!std::size_t! as it is used to address array
+ elements.
+
+ The pairs of global and local indices are
+ ordered by ascending global index. It is possible to access the pairs via
+\lstinline!operator[](TG& global)! in $log(n)$ time, where $n$ is the
+number of pairs in the set. In an efficient code it is advisable to
+access the index pairs using the provided iterators over the index pairs.
+
+Due to the ordering, the index set can only be changed, i.e. index pairs
+added or deleted, in a special resize phase. By calling the functions
+\lstinline!beginResize()! and \lstinline!endResize()! the programmer
+indicates that the resize phase starts and ends, respectively. During
+the call of \lstinline!endResize()! the deleted indices will be
+removed and the added index pairs will be sorted and merged with the existing
+ones.
+
+\subsection{ParallelLocalIndex}
+When dealing with overlapping index sets in distributed computing
+there often is the need to distinguish different partitions of an index
+set.%, e.g. $I_i$ and $\tilde{I}_i\setminus I_i$ as introduced in Section \ref{sec:domain_decomposition}.
+
+This is accomplished by using the class
+\begin{lstlisting}{}
+ template<typename TA> class ParallelLocalIndex;
+\end{lstlisting}
+as the type for the local index of class \lstinline!ParallelIndexSet!.
+Here the template parameter \lstinline!TA! is the type of the
+attributes used, e.g. an enumeration \lstinline!Flags! defined by
+\begin{lstlisting}
+ enum Flags {owner, ghost};
+\end{lstlisting}
+where
+\lstinline!owner! marks the indices $k \in I_p$ owned by process
+$p$ and \lstinline!ghost! the indices $k\not\in I_p$ owned
+by other processes.
+
+As an example let us look at an array distributed between two
+processes. In Figure \ref{fig:redistarray} one can see the array
+$a$ as it appears in a sequential program. Below there are two
+different distributions of that array. The local views $s_0$ and
+$s_1$ are the parts process $0$ and $1$ store in the case that $a$ is
+divided into two
+blocks. The local views $t_0$ and $t_1$ are the parts of $a$ that
+process $0$ and $1$ store in the case that $a$ is divided into 4
+blocks and process
+$0$ stores the first and third block and process $1$ the second and
+fourth block. The decompositions have an overlap of one and the indices have
+the attributes \lstinline!owner! and \lstinline!ghost! visualised by
+white and shaded cells, respectively.
+The index sets $I_s$ and $I_t$ corresponding to the decompositions $s_p$
+and $t_p$, $p \in \{0,1\}$, are shown in Figure \ref{fig:redistindex} as sets of triples
+$(g,l,a)$. Here $g$ is the global index, $l$ is the local index and
+$a$ is the attribute (either o for \lstinline!owner! or {g}
+for \lstinline!ghost!).
+\begin{figure}
+ \centering
+ \begin{tikzpicture}
+ \draw (0,3.3) ellipse (2.2cm and 1.5cm) node [align=center,yshift=0.2cm] {$I_s$\\(0,0,o) (1,1,o)\\(2,2,o) (3,3,o) (4,4,o)\\(5,5,o) (6,6,g)};
+ \draw (0,0) ellipse (2.2cm and 1.5cm) node [align=center,yshift=0.2cm] {$I_t$\\(0,0,o) (1,1,o) (2,2,o)\\(3,3,g) (5,4,g) (6,5,o)\\(7,6,o) (8,7,o) (9,8,g)};
+ \draw (2.5,-2.7) -- ++(0,7.5);
+ \draw (5,3.3) ellipse (2.2cm and 1.5cm) node [align=center,yshift=0.2cm] {$I_s$\\(5,0,g) (6,1,o)\\(7,2,o) (8,3,o) (9,4,o)\\(10,5,o) (11,6,o)};
+ \draw (5,0) ellipse (2.2cm and 1.5cm) node [align=center,yshift=0.2cm] {$I_t$\\(2,0,g) (3,1,o) (4,2,o)\\(5,3,o) (6,4,g) (8,5,g)\\(9,6,o) (10,7,o) (11,8,o)};
+ \node at (0,-2.2) {Processor 0};
+ \node at (5,-2.2) {Processor 1};
+ \end{tikzpicture}
+ \caption{Index sets for array redistribution}
+ \label{fig:redistindex}
+\end{figure}
+\begin{figure*}
+ \tikzset{
+ box/.style={
+ draw,
+ shape=rectangle,
+ minimum width=1.5em,
+ minimum height=1.5em,
+ anchor=base,
+ inner sep=0pt,
+ },
+ overlap/.style={fill=black!25!white},
+ }
+ \newcommand{\interior}[1]{%
+ \tikz[baseline={(0,0)}]\node[box]{#1};%
+ }
+ \newcommand{\overlap}[1]{%
+ \tikz[baseline={(0,0)}]\node[box,overlap]{#1};%
+ }
+ \def\mc{\multicolumn}%
+ \newcommand{\leader}[1]{\makebox[2em][r]{#1 }}%
+ \renewcommand{\arraystretch}{2}%
+ \centering
+ \begin{tabular}{r|l}
+ \mc2c{global array} \\
+ \mc2c{\leader{a:}%
+ \foreach\i in {0,...,11} {\interior{\i}}} \\
+ \mc2c{local views} \\
+ \leader{$s_0$:}%
+ \foreach\i in {0,...,5} {\interior{\i}}%
+ \foreach\i in {6} {\overlap {\i}} &
+ \leader{$s_1$:}%
+ \foreach\i in {5} {\overlap {\i}}%
+ \foreach\i in {6,...,11} {\interior{\i}} \\
+ \leader{$t_0$:}%
+ \foreach\i in {0,1,2} {\interior{\i}}%
+ \foreach\i in {3,5} {\overlap {\i}}%
+ \foreach\i in {6,7,8} {\interior{\i}}%
+ \foreach\i in {9} {\overlap {\i}} &
+ \leader{$t_1$:}%
+ \foreach\i in {2} {\overlap {\i}}%
+ \foreach\i in {3,4,5} {\interior{\i}}%
+ \foreach\i in {6,8} {\overlap {\i}}%
+ \foreach\i in {9,10,11} {\interior{\i}} \\
+ \mc1c{Processor 0} & \mc1{|c}{Processor 1} \\
+ \end{tabular}
+ \caption{Redistributed array}
+ \label{fig:redistarray}
+\end{figure*}
+
+The following code snippet demonstrates how to set up the index set
+$I_s$ on process $0$:
+\lstinputlisting[linerange={53-57,59-61,67-67}]{poosc08_test.cc}
+\subsection{Remote Indices}
+\label{sec:remote-indices}
+
+To set up communication between the processes every process needs to
+know which indices are also known to other processes and which
+attributes are attached to them on the remote side.
+There are scenarios where data is exchanged between different
+index sets, e.g. if the data is agglomerated on lesser processes or
+redistributed. Therefore communication is allowed to occur between different
+decompositions of the same index set.
+
+
+Let $I \subset \N$ be the global index set and
+$$
+(I^s_p)_{p\in{\cal P}},\quad \bigcup_{p\in{\cal P}} I^s_p = I,\quad
+\text{ and } \quad
+(I^t_p)_{p\in{\cal P}}, \quad\bigcup_{p\in{\cal P}} I^t_p = I
+$$ be two overlapping
+decompositions of the same index set $I$. Then an instance of class
+\lstinline!RemoteIndices! on process $p \in {\cal P}$
+stores the sets of triples
+\begin{equation}
+ \label{eq:ri_s_set}
+ \begin{split}
+ r_{p \rightarrow q}^{s} = \{ (g,(l,a),b) \,|\, g \in I^s_q \wedge g \in I_p^t,
+l=\gamma_p^s(g), a = \alpha_p^s(l), b =
+\alpha_q^t(\gamma_q^t(g))\}
+\end{split}
+\end{equation}
+and
+\begin{equation}
+ \label{eq:ri_t_set}
+ \begin{split}
+ r_{p \rightarrow q}^{t} = \{ (g,(l,a),b) \,|\, g \in I^s_q \wedge g \in I_p^t,
+ l=\gamma_p^t(g), a = \alpha_p^t(l), b =
+ \alpha_p^s(\gamma_p^s(g))\}\,,
+ \end{split}
+\end{equation}
+for all $q\in{\cal P}$.
+Here $\alpha^s_p$ and $\alpha^t_p$ denote the mapping of local
+indices on process $p$ onto attributes for the index set $I^s_p$ and
+$I^t_p$ as realised by \lstinline!ParallelLocalIndex!.
+Note that the sets $r_{p \rightarrow q}^{s}$ and $r_{p \rightarrow
+ q}^{t}$ will only be nonempty if the processes $p$ and $q$ manage
+overlapping index sets.
+
+For our example in Figure \ref{fig:redistarray} and Figure
+\ref{fig:redistindex} the interface between $I_s$ and $I_t$ on process
+$0$ is:
+\begin{align*}
+ r_{0\rightarrow 0}^{s} = \{&(0,(0,o),o), (1,(1,o),o), (2,(2,o),o),
+ (3,(3,o),g), (5,(5,o),g), (6,(6,g),o)\}\\
+ r_{0\rightarrow 0}^{t} = \{&(0,(0,o),o), (1,(1,o),o), (2,(2,o),o),
+ (3,(3,g),o), (5,(4,g),o), (6,(5,o),g)\}\\
+ r_{0\rightarrow 1}^{s} = \{&(2(2,o),g), (3,(3,o),o), (4,(4,o),o),
+ (5,(5,o),o), (6,(6,g),g)\}\\
+ r_{0\rightarrow 1}^{t} = \{&(5,(4,g),g), (6,(5,o),o), (7,(6,o),o),
+ (8,(7,o),o), (9,(8,g),o)\}
+\end{align*}
+This information can either be calculated automatically by
+communicating all indices in a ring or set up by hand if the user has
+this information available. Assuming that \lstinline!sis! is the index set
+$I_s$ and \lstinline!tis! the index set $I_t$ set up as described in
+the previous subsection and \lstinline!comm! is an MPI communicator
+then the simple call
+\lstinputlisting[linerange={83-84}]{poosc08_test.cc}
+on all processes automatically calculates this information and
+stores it in \lstinline!riRedist!. For a
+parallel calculation on the local views $s_0$ and $s_1$ calling
+\lstinputlisting[linerange={86-87}]{poosc08_test.cc}
+on all processes builds the necessary information in \lstinline!riS!.
+
+\subsection{Communication Interface}
+\label{sec:comm-interf}
+
+With the information provided by class \lstinline!RemoteIndices! the
+user can set up arbitrary communication interfaces. These interfaces
+are realised in \lstinline!template<typename T> class Interface!,
+where the template parameter \lstinline!T! is the custom type of the
+\lstinline!ParallelIndexSet! representing the index sets.
+Using the attributes attached to the indices by
+\lstinline!ParallelLocalIndex! the user can select subsets of the
+indices for exchanging data, e.g. send data from indices marked
+as \lstinline!owner! to indices marked as \lstinline!ghost!.
+
+Basically the interface on process $p$ manages two sets for each
+process $q$ it shares common indices with:
+
+$$
+i_{p\rightarrow q}^{s} = \{ l | (g,(l,a),b) \in r_{p\rightarrow q}^{s} |
+a \in A_s \wedge b \in A_t\}
+$$
+and
+$$
+i_{p\rightarrow q}^{t} = \{ l | (g, (l,a), b) \in r_{p\rightarrow q}^{t} |
+a \in A_t \wedge b \in A_s\}\,,
+$$
+where $A_s$ and $A_t$ are the attributes marking the indices where the
+source and target of the communication will be, respectively.
+
+In our example these sets on process $0$ will be stored for
+communication if $A_s=\{o\}$ and $A_t=\{o, g\}$:
+\begin{align*}
+ i_{0\rightarrow 0}^{s} = \{0, 1, 3, 5\}\quad & \quad
+ i_{0\rightarrow 0}^{t} = \{0, 1, 3, 4\}\\
+ i_{0\rightarrow 1}^{s} = \{2, 3, 4, 5\}\quad & \quad
+ i_{0\rightarrow 1}^{t} = \{5, 6, 7, 8\}\,.
+\end{align*}
+
+The following code snippet would build the interface above in
+\lstinline!infRedist! as well as the interface \lstinline!infS!
+to communicate between
+indices marked as \lstinline!owner! and \lstinline!ghost! on the local
+array views $s_0$ and $s_1$:
+\lstinputlisting[linerange={89-97}]{poosc08_test.cc}
+
+\subsection{Communicator}
+\label{sec:communicator}
+
+Using the classes from the previous sections all information about the
+communication is available and we are set to communicate data values
+of arbitrary
+container types. The only prerequisite for the container type is that
+its values are addressable via \lstinline!operator[](size_t index)!.
+This should be safe to assume.
+
+An important feature of our communicators is that we are not only able to
+send one data item per index, but also different numbers of data
+elements (of the same type) for each index. This is
+supported in a generic way by the traits class
+\lstinline!template<class V> struct CommPolicy!
+describing the container type \lstinline!V!. The
+\lstinline!typedef IndexedType! is the atomic type to be communicated and
+\lstinline!typedef IndexedTypeFlag! is either \lstinline!SizeOne! if
+there is only one data item per index or \lstinline!VariableSize! if the
+number of data items per index is variable.
+
+The default implementation works for all
+array-like containers which provide only one data item per index. For all
+other containers the user has to provide its own custom
+specialisation.
+%For the vector classes of ISTL (up to two block levels)
+%those specialisations are already implemented.
+
+The class \lstinline!template<class T> class BufferedCommunicator!
+performs the
+actual communication. The template parameter \lstinline!T! describes
+the type of the parallel index set.
+It uses the information about the communication interface provided by
+an object of class \lstinline!Interface! to set up communication
+buffers for a container containing a specific data type. It is also
+responsible for gathering the data before and scattering the data
+after the communication step. The strict separation of the interface
+description from the actual buffering and communication allows for
+reusing the interface information with various different container and
+data types.
+
+Before the communication can start one has to call the
+\lstinline!build! method with the data source and target containers as
+well as the communication interface as arguments. Assuming
+\lstinline!s! and \lstinline!t! as arrays $s_i$ and $t_i$,
+respectively, then
+\lstinputlisting[linerange=103-106]{poosc08_test.cc}
+demonstrates how to set up the communicator \lstinline!bCommRedist! for the array
+redistribution and \lstinline!bComm! for a parallel calculation on the
+local views $s_i$. The
+\lstinline!build! function
+calculates the size of the messages to send to other processes and
+allocates buffers for the send and receive actions. The
+representatives \lstinline!s! and \lstinline!t! are
+needed to get the number of data values at each index in the case of
+variable numbers of data items per index. Note that, due to the generic
+programming techniques used, the compiler knows if the number of data
+points is constant for each index and will apply a specialised
+algorithm for calculating the message size without querying neither
+\lstinline!s! nor \lstinline!t!. Clean up of allocated
+resources is done either by calling the method \lstinline!free()! or
+automatically in the destructor.
+
+The actual communication takes place if one of the methods
+\lstinline!forward!
+and \lstinline!backward! is called. In our case in
+\lstinline!bCommRedist! the \lstinline!forward! method
+sends data from the local views $s_i$ to the local views $t_i$
+according to the interface information and the \lstinline!backward!
+method in the opposite direction.
+
+The following code snippet first redistributes the local views $s_i$
+of the global array to the local views $t_i$ and
+performs some calculation on this representation. Afterwards the
+result is communicated backwards.
+\lstinputlisting[linerange=110-113]{poosc08_test.cc}
+
+Note that both methods have a different template parameter, either
+\lstinline!CopyData! or \lstinline!AddData!. These are policies for
+gathering and scattering the data items. The former just copies
+the data from and to the location. The latter copies from the source
+location but adds the received data items to the target
+entries. Assuming our data is stored in simple C-arrays
+\lstinline!AddData! could be implemented like this:
+
+\lstinputlisting[linerange=16-27]{poosc08_test.cc}
+
+Note that arbitrary
+manipulations can be applied to the communicated data in both methods.
+
+For containers with multiple data items associated with one index
+the methods \lstinline!gather! and \lstinline!scatter! must have an additional
+integer argument specifying the sub-index.
+
+\section{Collective Communication}
+\label{sec:collective-communication}
+
+While communicating entries of array-like structures is a prominent
+task in scientific computing codes one must not neglect
+collective communication operations, like gathering and scattering data
+ from and to all processes, respectively, or waiting for other processes. An
+abstraction for these operations is crucial for decoupling the
+communication from the parallel programming paradigm used.
+
+Therefore we designed
+\lstinline!template<class T> class CollectiveCommunication! which provides
+information of the underlying parallel programming paradigm as well as
+the collective communication operations as known from MPI. See Table
+\ref{tab:col-comm} for a list of all functions.
+
+\begin{table*}%[b]
+ \centering
+ \begin{tabular}{p{.5\textwidth}|p{.4\textwidth}}
+ Function&Description\\\hline\hline
+ \lstinline!int rank()!&Get the rank of the process\\
+ \lstinline!int size()!&Get the number of processes\\
+ \lstinline!template<typename T> T sum (T& in)!& Compute global
+ sum\\
+ \lstinline!template<typename T> T prod (T& in)!&Compute global
+ product\\
+ \lstinline!template<typename T> T min (T& in)!&Compute global minimum\\
+ \lstinline!template<typename T> T max (T& in)!&Compute global
+ maximum\\
+ \lstinline!void barrier()!& Wait for all processes.\\
+ \lstinline!template<typename T> int broadcast (T* inout, int len, int root)!
+& Broadcast an array from root to all other processes\\
+\lstinline!template<typename T> int gather (T* in, T* out, int len, int root)!&
+Gather arrays at a root process\\
+\lstinline!template<typename BinaryFunction, typename Type> int allreduce(Type* in, Type* out, int len)!&
+Combine values from all processes on all processes. Combine function
+is given with \lstinline!BinaryFunction!
+ \end{tabular}
+ \caption{Collective Communication Functions}
+ \label{tab:col-comm}
+\end{table*}
+
+Currently there is a default implementation for sequential programs
+as well as a specialisation working with MPI. This approach allows for
+running parallel programs sequentially without any parallel overhead
+simply by choosing the sequential specialisation at compile time.
+Note that the interface is far more convenient to use than the C++
+interface of MPI. The latter is a simple wrapper around the C
+implementation without taking advantage of the power of generic programming.
+
+
+The collective communication classes were developed before the release
+of Boost.MPI \cite{boost_mpi}. In contrast to Boost.MPI it was never
+meant as a full generic implementation of all MPI functions. Instead it
+is restricted to the most basic subset of collective operations needed
+to implement finite element methods and iterative solver using the
+previously described components. This lean interface should make it
+possible to easily port this approach to
+thread based parallelisation as well as other parallelisation
+paradigms. This would allow code to easily switch between different paradigms
+
+
+\bibliographystyle{plainnat}
+\bibliography{communication}
+\end{document}
--- /dev/null
+%!PS-Adobe-2.0 EPSF-2.0
+%%Title: ../eps/darray.eps
+%%Creator: fig2dev Version 3.2 Patchlevel 1
+%%CreationDate: Thu Mar 4 15:25:17 1999
+%%For: peter@speedo (Peter Bastian)
+%%Orientation: Portrait
+%%BoundingBox: 0 0 345 260
+%%Pages: 0
+%%BeginSetup
+%%EndSetup
+%%Magnification: 1.0000
+%%EndComments
+/$F2psDict 200 dict def
+$F2psDict begin
+$F2psDict /mtrx matrix put
+/col-1 {0 setgray} bind def
+/col0 {0.000 0.000 0.000 srgb} bind def
+/col1 {0.000 0.000 1.000 srgb} bind def
+/col2 {0.000 1.000 0.000 srgb} bind def
+/col3 {0.000 1.000 1.000 srgb} bind def
+/col4 {1.000 0.000 0.000 srgb} bind def
+/col5 {1.000 0.000 1.000 srgb} bind def
+/col6 {1.000 1.000 0.000 srgb} bind def
+/col7 {1.000 1.000 1.000 srgb} bind def
+/col8 {0.000 0.000 0.560 srgb} bind def
+/col9 {0.000 0.000 0.690 srgb} bind def
+/col10 {0.000 0.000 0.820 srgb} bind def
+/col11 {0.530 0.810 1.000 srgb} bind def
+/col12 {0.000 0.560 0.000 srgb} bind def
+/col13 {0.000 0.690 0.000 srgb} bind def
+/col14 {0.000 0.820 0.000 srgb} bind def
+/col15 {0.000 0.560 0.560 srgb} bind def
+/col16 {0.000 0.690 0.690 srgb} bind def
+/col17 {0.000 0.820 0.820 srgb} bind def
+/col18 {0.560 0.000 0.000 srgb} bind def
+/col19 {0.690 0.000 0.000 srgb} bind def
+/col20 {0.820 0.000 0.000 srgb} bind def
+/col21 {0.560 0.000 0.560 srgb} bind def
+/col22 {0.690 0.000 0.690 srgb} bind def
+/col23 {0.820 0.000 0.820 srgb} bind def
+/col24 {0.500 0.190 0.000 srgb} bind def
+/col25 {0.630 0.250 0.000 srgb} bind def
+/col26 {0.750 0.380 0.000 srgb} bind def
+/col27 {1.000 0.500 0.500 srgb} bind def
+/col28 {1.000 0.630 0.630 srgb} bind def
+/col29 {1.000 0.750 0.750 srgb} bind def
+/col30 {1.000 0.880 0.880 srgb} bind def
+/col31 {1.000 0.840 0.000 srgb} bind def
+
+end
+save
+-130.0 296.0 translate
+1 -1 scale
+
+/cp {closepath} bind def
+/ef {eofill} bind def
+/gr {grestore} bind def
+/gs {gsave} bind def
+/sa {save} bind def
+/rs {restore} bind def
+/l {lineto} bind def
+/m {moveto} bind def
+/rm {rmoveto} bind def
+/n {newpath} bind def
+/s {stroke} bind def
+/sh {show} bind def
+/slc {setlinecap} bind def
+/slj {setlinejoin} bind def
+/slw {setlinewidth} bind def
+/srgb {setrgbcolor} bind def
+/rot {rotate} bind def
+/sc {scale} bind def
+/sd {setdash} bind def
+/ff {findfont} bind def
+/sf {setfont} bind def
+/scf {scalefont} bind def
+/sw {stringwidth} bind def
+/tr {translate} bind def
+/tnt {dup dup currentrgbcolor
+ 4 -2 roll dup 1 exch sub 3 -1 roll mul add
+ 4 -2 roll dup 1 exch sub 3 -1 roll mul add
+ 4 -2 roll dup 1 exch sub 3 -1 roll mul add srgb}
+ bind def
+/shd {dup dup currentrgbcolor 4 -2 roll mul 4 -2 roll mul
+ 4 -2 roll mul srgb} bind def
+/$F2psBegin {$F2psDict begin /$F2psEnteredState save def} def
+/$F2psEnd {$F2psEnteredState restore end} def
+%%EndProlog
+
+$F2psBegin
+10 setmiterlimit
+n -1000 5689 m -1000 -1000 l 8527 -1000 l 8527 5689 l cp clip
+ 0.06299 0.06299 sc
+% Polyline
+7.500 slw
+n 5265 3105 m 7515 3105 l 7515 3780 l 5265 3780 l cp gs col0 s gr
+% Polyline
+n 5670 3105 m 5670 3780 l gs col0 s gr
+% Polyline
+n 6120 3105 m 6120 3780 l gs col0 s gr
+% Polyline
+n 6570 3105 m 6570 3780 l gs col0 s gr
+% Polyline
+n 7020 3105 m 7020 3780 l gs col0 s gr
+/Times-Roman ff 180.00 scf sf
+5400 4050 m
+gs 1 -1 sc (0) col0 sh gr
+/Times-Roman ff 180.00 scf sf
+6300 4050 m
+gs 1 -1 sc (2) col0 sh gr
+/Times-Roman ff 180.00 scf sf
+6750 4050 m
+gs 1 -1 sc (3) col0 sh gr
+/Times-Roman ff 180.00 scf sf
+7245 4050 m
+gs 1 -1 sc (4) col0 sh gr
+/Times-Roman ff 180.00 scf sf
+5850 4050 m
+gs 1 -1 sc (1) col0 sh gr
+% Polyline
+n 2340 3105 m 4590 3105 l 4590 3780 l 2340 3780 l cp gs col0 s gr
+% Polyline
+n 2745 3105 m 2745 3780 l gs col0 s gr
+% Polyline
+n 3195 3105 m 3195 3780 l gs col0 s gr
+% Polyline
+n 3645 3105 m 3645 3780 l gs col0 s gr
+% Polyline
+n 4095 3105 m 4095 3780 l gs col0 s gr
+/Times-Roman ff 180.00 scf sf
+2475 4050 m
+gs 1 -1 sc (0) col0 sh gr
+/Times-Roman ff 180.00 scf sf
+3375 4050 m
+gs 1 -1 sc (2) col0 sh gr
+/Times-Roman ff 180.00 scf sf
+3825 4050 m
+gs 1 -1 sc (3) col0 sh gr
+/Times-Roman ff 180.00 scf sf
+4320 4050 m
+gs 1 -1 sc (4) col0 sh gr
+/Times-Roman ff 180.00 scf sf
+2925 4050 m
+gs 1 -1 sc (1) col0 sh gr
+/Times-Roman ff 180.00 scf sf
+2970 4320 m
+gs 1 -1 sc (local indices) col0 sh gr
+/Times-Roman ff 180.00 scf sf
+2475 4635 m
+gs 1 -1 sc (local array in processor 0) col0 sh gr
+% Polyline
+n 3150 1215 m 6750 1215 l 6750 1890 l 3150 1890 l cp gs col0 s gr
+% Polyline
+n 4950 1215 m 4950 1890 l gs col0 s gr
+% Polyline
+n 4050 1215 m 4050 1890 l gs col0 s gr
+% Polyline
+n 3600 1215 m 3600 1890 l gs col0 s gr
+% Polyline
+n 4500 1215 m 4500 1890 l gs col0 s gr
+% Polyline
+n 5850 1215 m 5850 1890 l gs col0 s gr
+% Polyline
+n 5400 1215 m 5400 1890 l gs col0 s gr
+% Polyline
+n 6300 1215 m 6300 1890 l gs col0 s gr
+% Polyline
+n 2520 3105 m 3375 1890 l gs col0 s gr
+% Polyline
+n 2970 3105 m 4230 1890 l gs col0 s gr
+% Polyline
+n 3375 3105 m 6030 1890 l gs col0 s gr
+% Polyline
+n 3825 3105 m 4725 1890 l gs col0 s gr
+% Polyline
+n 5490 3105 m 3465 1890 l gs col0 s gr
+% Polyline
+n 5850 3105 m 3870 1890 l gs col0 s gr
+% Polyline
+n 6345 3105 m 6525 1890 l gs col0 s gr
+% Polyline
+n 6795 3105 m 5625 1890 l gs col0 s gr
+% Polyline
+n 7290 3105 m 5175 1890 l gs col0 s gr
+% Polyline
+n 4320 3105 m 5535 1890 l gs col0 s gr
+/Times-Roman ff 180.00 scf sf
+3285 1035 m
+gs 1 -1 sc (0) col0 sh gr
+/Times-Roman ff 180.00 scf sf
+3735 1035 m
+gs 1 -1 sc (1) col0 sh gr
+/Times-Roman ff 180.00 scf sf
+4230 1035 m
+gs 1 -1 sc (2) col0 sh gr
+/Times-Roman ff 180.00 scf sf
+4680 1035 m
+gs 1 -1 sc (3) col0 sh gr
+/Times-Roman ff 180.00 scf sf
+5085 1035 m
+gs 1 -1 sc (4) col0 sh gr
+/Times-Roman ff 180.00 scf sf
+5535 1035 m
+gs 1 -1 sc (5) col0 sh gr
+/Times-Roman ff 180.00 scf sf
+5985 1035 m
+gs 1 -1 sc (6) col0 sh gr
+/Times-Roman ff 180.00 scf sf
+6435 1035 m
+gs 1 -1 sc (7) col0 sh gr
+/Times-Roman ff 180.00 scf sf
+5940 4320 m
+gs 1 -1 sc (local indices) col0 sh gr
+/Times-Roman ff 180.00 scf sf
+5490 4635 m
+gs 1 -1 sc (local array in processor 1) col0 sh gr
+/Times-Roman ff 180.00 scf sf
+3825 720 m
+gs 1 -1 sc (global array with global indices) col0 sh gr
+/Times-Italic ff 180.00 scf sf
+2880 1665 m
+gs 1 -1 sc (a:) col0 sh gr
+/Times-Italic ff 180.00 scf sf
+2070 3555 m
+gs 1 -1 sc (a0:) col0 sh gr
+/Times-Italic ff 180.00 scf sf
+4995 3555 m
+gs 1 -1 sc (a1:) col0 sh gr
+$F2psEnd
+rs
--- /dev/null
+%!PS-Adobe-3.0 EPSF-3.0
+%%Creator: inkscape 0.44.1
+%%Pages: 1
+%%Orientation: Portrait
+%%BoundingBox: -5 414 276 601
+%%HiResBoundingBox: -5.9999911 414 276 600.4
+%%DocumentMedia: plain 596 842 0 () ()
+%%EndComments
+%%Page: 1 1
+0 842 translate
+0.8 -0.8 scale
+gsave [1 0 0 1 0 0] concat
+gsave
+0 0 0 setrgbcolor
+newpath
+11.34082 347.36218 moveto
+10.730139 348.41037 10.276689 349.44715 9.9804688 350.47253 curveto
+9.6842421 351.49793 9.5361303 352.53699 9.5361328 353.58972 curveto
+9.5361303 354.64246 9.6842421 355.68608 9.9804688 356.72058 curveto
+10.281247 357.75053 10.734697 358.78731 11.34082 359.83093 curveto
+10.24707 359.83093 lineto
+9.563474 358.75997 9.0507792 357.70723 8.7089844 356.67273 curveto
+8.3717434 355.63823 8.2031238 354.61056 8.203125 353.58972 curveto
+8.2031238 352.57345 8.3717434 351.55034 8.7089844 350.52039 curveto
+9.0462219 349.49045 9.5589167 348.43771 10.24707 347.36218 curveto
+11.34082 347.36218 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+15.155273 356.82312 moveto
+19.974609 356.82312 lineto
+19.974609 357.98523 lineto
+13.494141 357.98523 lineto
+13.494141 356.82312 lineto
+14.018228 356.2808 14.731443 355.55392 15.633789 354.64246 curveto
+16.540686 353.72644 17.110347 353.13628 17.342773 352.87195 curveto
+17.784825 352.37521 18.092442 351.95594 18.265625 351.61414 curveto
+18.443353 351.26779 18.53222 350.92827 18.532227 350.59558 curveto
+18.53222 350.05327 18.340814 349.61121 17.958008 349.26941 curveto
+17.579747 348.92762 17.085282 348.75672 16.474609 348.75671 curveto
+16.041663 348.75672 15.583656 348.83192 15.100586 348.9823 curveto
+14.622068 349.1327 14.109373 349.36056 13.5625 349.66589 curveto
+13.5625 348.27136 lineto
+14.118488 348.04806 14.638019 347.87945 15.121094 347.7655 curveto
+15.604164 347.65158 16.04622 347.59461 16.447266 347.5946 curveto
+17.504552 347.59461 18.34765 347.85894 18.976562 348.38757 curveto
+19.605462 348.91623 19.919914 349.62261 19.919922 350.50671 curveto
+19.919914 350.92599 19.840162 351.32475 19.680664 351.703 curveto
+19.525709 352.07671 19.240879 352.51876 18.826172 353.02917 curveto
+18.712233 353.16134 18.349929 353.54415 17.739258 354.17761 curveto
+17.128576 354.80652 16.267249 355.68836 15.155273 356.82312 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+23.023438 356.2489 moveto
+24.46582 356.2489 lineto
+24.46582 357.42468 lineto
+23.344727 359.61218 lineto
+22.462891 359.61218 lineto
+23.023438 357.42468 lineto
+23.023438 356.2489 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+28.526367 356.82312 moveto
+33.345703 356.82312 lineto
+33.345703 357.98523 lineto
+26.865234 357.98523 lineto
+26.865234 356.82312 lineto
+27.389321 356.2808 28.102537 355.55392 29.004883 354.64246 curveto
+29.91178 353.72644 30.481441 353.13628 30.713867 352.87195 curveto
+31.155919 352.37521 31.463536 351.95594 31.636719 351.61414 curveto
+31.814447 351.26779 31.903314 350.92827 31.90332 350.59558 curveto
+31.903314 350.05327 31.711908 349.61121 31.329102 349.26941 curveto
+30.950841 348.92762 30.456376 348.75672 29.845703 348.75671 curveto
+29.412757 348.75672 28.954749 348.83192 28.47168 348.9823 curveto
+27.993162 349.1327 27.480467 349.36056 26.933594 349.66589 curveto
+26.933594 348.27136 lineto
+27.489582 348.04806 28.009112 347.87945 28.492188 347.7655 curveto
+28.975257 347.65158 29.417314 347.59461 29.818359 347.5946 curveto
+30.875646 347.59461 31.718744 347.85894 32.347656 348.38757 curveto
+32.976555 348.91623 33.291008 349.62261 33.291016 350.50671 curveto
+33.291008 350.92599 33.211256 351.32475 33.051758 351.703 curveto
+32.896803 352.07671 32.611972 352.51876 32.197266 353.02917 curveto
+32.083327 353.16134 31.721023 353.54415 31.110352 354.17761 curveto
+30.49967 354.80652 29.638343 355.68836 28.526367 356.82312 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+36.394531 356.2489 moveto
+37.836914 356.2489 lineto
+37.836914 357.42468 lineto
+36.71582 359.61218 lineto
+35.833984 359.61218 lineto
+36.394531 357.42468 lineto
+36.394531 356.2489 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+43.49707 351.21082 moveto
+42.822588 351.21082 42.289385 351.47514 41.897461 352.00378 curveto
+41.505532 352.52788 41.309568 353.24793 41.30957 354.16394 curveto
+41.309568 355.07996 41.503253 355.80229 41.890625 356.33093 curveto
+42.282549 356.85502 42.81803 357.11707 43.49707 357.11707 curveto
+44.166987 357.11707 44.697911 356.85274 45.089844 356.3241 curveto
+45.481765 355.79545 45.677728 355.0754 45.677734 354.16394 curveto
+45.677728 353.25704 45.481765 352.53927 45.089844 352.01062 curveto
+44.697911 351.47742 44.166987 351.21082 43.49707 351.21082 curveto
+43.49707 350.14441 moveto
+44.590815 350.14442 45.449864 350.49989 46.074219 351.21082 curveto
+46.69856 351.92176 47.010734 352.90613 47.010742 354.16394 curveto
+47.010734 355.4172 46.69856 356.40157 46.074219 357.11707 curveto
+45.449864 357.828 44.590815 358.18347 43.49707 358.18347 curveto
+42.39876 358.18347 41.537433 357.828 40.913086 357.11707 curveto
+40.293293 356.40157 39.983398 355.4172 39.983398 354.16394 curveto
+39.983398 352.90613 40.293293 351.92176 40.913086 351.21082 curveto
+41.537433 350.49989 42.39876 350.14442 43.49707 350.14441 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+48.890625 347.36218 moveto
+49.984375 347.36218 lineto
+50.667966 348.43771 51.178382 349.49045 51.515625 350.52039 curveto
+51.857418 351.55034 52.028316 352.57345 52.02832 353.58972 curveto
+52.028316 354.61056 51.857418 355.63823 51.515625 356.67273 curveto
+51.178382 357.70723 50.667966 358.75997 49.984375 359.83093 curveto
+48.890625 359.83093 lineto
+49.496743 358.78731 49.947914 357.75053 50.244141 356.72058 curveto
+50.544919 355.68608 50.69531 354.64246 50.695312 353.58972 curveto
+50.69531 352.53699 50.544919 351.49793 50.244141 350.47253 curveto
+49.947914 349.44715 49.496743 348.41037 48.890625 347.36218 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+32.34082 327.36218 moveto
+31.730139 328.41037 31.276689 329.44715 30.980469 330.47253 curveto
+30.684242 331.49793 30.53613 332.53699 30.536133 333.58972 curveto
+30.53613 334.64246 30.684242 335.68608 30.980469 336.72058 curveto
+31.281247 337.75053 31.734697 338.78731 32.34082 339.83093 curveto
+31.24707 339.83093 lineto
+30.563474 338.75997 30.050779 337.70723 29.708984 336.67273 curveto
+29.371743 335.63823 29.203124 334.61056 29.203125 333.58972 curveto
+29.203124 332.57345 29.371743 331.55034 29.708984 330.52039 curveto
+30.046222 329.49045 30.558917 328.43771 31.24707 327.36218 curveto
+32.34082 327.36218 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+37.918945 328.68835 moveto
+37.208004 328.68836 36.672523 329.03927 36.3125 329.74109 curveto
+35.957029 330.43836 35.779295 331.48882 35.779297 332.89246 curveto
+35.779295 334.29155 35.957029 335.342 36.3125 336.04382 curveto
+36.672523 336.74109 37.208004 337.08972 37.918945 337.08972 curveto
+38.634435 337.08972 39.169916 336.74109 39.525391 336.04382 curveto
+39.88541 335.342 40.065423 334.29155 40.06543 332.89246 curveto
+40.065423 331.48882 39.88541 330.43836 39.525391 329.74109 curveto
+39.169916 329.03927 38.634435 328.68836 37.918945 328.68835 curveto
+37.918945 327.5946 moveto
+39.06282 327.59461 39.93554 328.04806 40.537109 328.95496 curveto
+41.143221 329.85731 41.446281 331.16981 41.446289 332.89246 curveto
+41.446281 334.61056 41.143221 335.92306 40.537109 336.82996 curveto
+39.93554 337.7323 39.06282 338.18347 37.918945 338.18347 curveto
+36.775062 338.18347 35.900063 337.7323 35.293945 336.82996 curveto
+34.692382 335.92306 34.391601 334.61056 34.391602 332.89246 curveto
+34.391601 331.16981 34.692382 329.85731 35.293945 328.95496 curveto
+35.900063 328.04806 36.775062 327.59461 37.918945 327.5946 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+44.023438 336.2489 moveto
+45.46582 336.2489 lineto
+45.46582 337.42468 lineto
+44.344727 339.61218 lineto
+43.462891 339.61218 lineto
+44.023438 337.42468 lineto
+44.023438 336.2489 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+51.290039 328.68835 moveto
+50.579098 328.68836 50.043617 329.03927 49.683594 329.74109 curveto
+49.328123 330.43836 49.150388 331.48882 49.150391 332.89246 curveto
+49.150388 334.29155 49.328123 335.342 49.683594 336.04382 curveto
+50.043617 336.74109 50.579098 337.08972 51.290039 337.08972 curveto
+52.005529 337.08972 52.54101 336.74109 52.896484 336.04382 curveto
+53.256504 335.342 53.436517 334.29155 53.436523 332.89246 curveto
+53.436517 331.48882 53.256504 330.43836 52.896484 329.74109 curveto
+52.54101 329.03927 52.005529 328.68836 51.290039 328.68835 curveto
+51.290039 327.5946 moveto
+52.433914 327.59461 53.306634 328.04806 53.908203 328.95496 curveto
+54.514315 329.85731 54.817375 331.16981 54.817383 332.89246 curveto
+54.817375 334.61056 54.514315 335.92306 53.908203 336.82996 curveto
+53.306634 337.7323 52.433914 338.18347 51.290039 338.18347 curveto
+50.146156 338.18347 49.271156 337.7323 48.665039 336.82996 curveto
+48.063475 335.92306 47.762694 334.61056 47.762695 332.89246 curveto
+47.762694 331.16981 48.063475 329.85731 48.665039 328.95496 curveto
+49.271156 328.04806 50.146156 327.59461 51.290039 327.5946 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+57.394531 336.2489 moveto
+58.836914 336.2489 lineto
+58.836914 337.42468 lineto
+57.71582 339.61218 lineto
+56.833984 339.61218 lineto
+57.394531 337.42468 lineto
+57.394531 336.2489 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+64.49707 331.21082 moveto
+63.822588 331.21082 63.289385 331.47514 62.897461 332.00378 curveto
+62.505532 332.52788 62.309568 333.24793 62.30957 334.16394 curveto
+62.309568 335.07996 62.503253 335.80229 62.890625 336.33093 curveto
+63.282549 336.85502 63.81803 337.11707 64.49707 337.11707 curveto
+65.166987 337.11707 65.697911 336.85274 66.089844 336.3241 curveto
+66.481765 335.79545 66.677728 335.0754 66.677734 334.16394 curveto
+66.677728 333.25704 66.481765 332.53927 66.089844 332.01062 curveto
+65.697911 331.47742 65.166987 331.21082 64.49707 331.21082 curveto
+64.49707 330.14441 moveto
+65.590815 330.14442 66.449864 330.49989 67.074219 331.21082 curveto
+67.69856 331.92176 68.010734 332.90613 68.010742 334.16394 curveto
+68.010734 335.4172 67.69856 336.40157 67.074219 337.11707 curveto
+66.449864 337.828 65.590815 338.18347 64.49707 338.18347 curveto
+63.39876 338.18347 62.537433 337.828 61.913086 337.11707 curveto
+61.293293 336.40157 60.983398 335.4172 60.983398 334.16394 curveto
+60.983398 332.90613 61.293293 331.92176 61.913086 331.21082 curveto
+62.537433 330.49989 63.39876 330.14442 64.49707 330.14441 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+69.890625 327.36218 moveto
+70.984375 327.36218 lineto
+71.667966 328.43771 72.178382 329.49045 72.515625 330.52039 curveto
+72.857418 331.55034 73.028316 332.57345 73.02832 333.58972 curveto
+73.028316 334.61056 72.857418 335.63823 72.515625 336.67273 curveto
+72.178382 337.70723 71.667966 338.75997 70.984375 339.83093 curveto
+69.890625 339.83093 lineto
+70.496743 338.78731 70.947914 337.75053 71.244141 336.72058 curveto
+71.544919 335.68608 71.69531 334.64246 71.695312 333.58972 curveto
+71.69531 332.53699 71.544919 331.49793 71.244141 330.47253 curveto
+70.947914 329.44715 70.496743 328.41037 69.890625 327.36218 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+77.683594 328.36218 moveto
+77.072913 329.41037 76.619463 330.44715 76.323242 331.47253 curveto
+76.027016 332.49793 75.878904 333.53699 75.878906 334.58972 curveto
+75.878904 335.64246 76.027016 336.68608 76.323242 337.72058 curveto
+76.62402 338.75053 77.07747 339.78731 77.683594 340.83093 curveto
+76.589844 340.83093 lineto
+75.906247 339.75997 75.393553 338.70723 75.051758 337.67273 curveto
+74.714517 336.63823 74.545897 335.61056 74.545898 334.58972 curveto
+74.545897 333.57345 74.714517 332.55034 75.051758 331.52039 curveto
+75.388995 330.49045 75.90169 329.43771 76.589844 328.36218 curveto
+77.683594 328.36218 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+80.547852 337.82312 moveto
+82.803711 337.82312 lineto
+82.803711 330.03699 lineto
+80.349609 330.52917 lineto
+80.349609 329.27136 lineto
+82.790039 328.77917 lineto
+84.170898 328.77917 lineto
+84.170898 337.82312 lineto
+86.426758 337.82312 lineto
+86.426758 338.98523 lineto
+80.547852 338.98523 lineto
+80.547852 337.82312 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+89.366211 337.2489 moveto
+90.808594 337.2489 lineto
+90.808594 338.42468 lineto
+89.6875 340.61218 lineto
+88.805664 340.61218 lineto
+89.366211 338.42468 lineto
+89.366211 337.2489 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+93.918945 337.82312 moveto
+96.174805 337.82312 lineto
+96.174805 330.03699 lineto
+93.720703 330.52917 lineto
+93.720703 329.27136 lineto
+96.161133 328.77917 lineto
+97.541992 328.77917 lineto
+97.541992 337.82312 lineto
+99.797852 337.82312 lineto
+99.797852 338.98523 lineto
+93.918945 338.98523 lineto
+93.918945 337.82312 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+102.7373 337.2489 moveto
+104.17969 337.2489 lineto
+104.17969 338.42468 lineto
+103.05859 340.61218 lineto
+102.17676 340.61218 lineto
+102.7373 338.42468 lineto
+102.7373 337.2489 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+109.83984 332.21082 moveto
+109.16536 332.21082 108.63216 332.47514 108.24023 333.00378 curveto
+107.8483 333.52788 107.65234 334.24793 107.65234 335.16394 curveto
+107.65234 336.07996 107.84603 336.80229 108.2334 337.33093 curveto
+108.62532 337.85502 109.1608 338.11707 109.83984 338.11707 curveto
+110.50976 338.11707 111.04068 337.85274 111.43262 337.3241 curveto
+111.82454 336.79545 112.0205 336.0754 112.02051 335.16394 curveto
+112.0205 334.25704 111.82454 333.53927 111.43262 333.01062 curveto
+111.04068 332.47742 110.50976 332.21082 109.83984 332.21082 curveto
+109.83984 331.14441 moveto
+110.93359 331.14442 111.79264 331.49989 112.41699 332.21082 curveto
+113.04133 332.92176 113.35351 333.90613 113.35352 335.16394 curveto
+113.35351 336.4172 113.04133 337.40157 112.41699 338.11707 curveto
+111.79264 338.828 110.93359 339.18347 109.83984 339.18347 curveto
+108.74153 339.18347 107.88021 338.828 107.25586 338.11707 curveto
+106.63607 337.40157 106.32617 336.4172 106.32617 335.16394 curveto
+106.32617 333.90613 106.63607 332.92176 107.25586 332.21082 curveto
+107.88021 331.49989 108.74153 331.14442 109.83984 331.14441 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+115.2334 328.36218 moveto
+116.32715 328.36218 lineto
+117.01074 329.43771 117.52116 330.49045 117.8584 331.52039 curveto
+118.20019 332.55034 118.37109 333.57345 118.37109 334.58972 curveto
+118.37109 335.61056 118.20019 336.63823 117.8584 337.67273 curveto
+117.52116 338.70723 117.01074 339.75997 116.32715 340.83093 curveto
+115.2334 340.83093 lineto
+115.83952 339.78731 116.29069 338.75053 116.58691 337.72058 curveto
+116.88769 336.68608 117.03808 335.64246 117.03809 334.58972 curveto
+117.03808 333.53699 116.88769 332.49793 116.58691 331.47253 curveto
+116.29069 330.44715 115.83952 329.41037 115.2334 328.36218 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+56.84082 347.36218 moveto
+56.230139 348.41037 55.776689 349.44715 55.480469 350.47253 curveto
+55.184242 351.49793 55.03613 352.53699 55.036133 353.58972 curveto
+55.03613 354.64246 55.184242 355.68608 55.480469 356.72058 curveto
+55.781247 357.75053 56.234697 358.78731 56.84082 359.83093 curveto
+55.74707 359.83093 lineto
+55.063474 358.75997 54.550779 357.70723 54.208984 356.67273 curveto
+53.871743 355.63823 53.703124 354.61056 53.703125 353.58972 curveto
+53.703124 352.57345 53.871743 351.55034 54.208984 350.52039 curveto
+54.546222 349.49045 55.058917 348.43771 55.74707 347.36218 curveto
+56.84082 347.36218 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+63.649414 352.4823 moveto
+64.310215 352.62358 64.825188 352.91753 65.194336 353.36414 curveto
+65.568026 353.81075 65.754875 354.36219 65.754883 355.01843 curveto
+65.754875 356.0256 65.408521 356.80489 64.71582 357.35632 curveto
+64.023106 357.90776 63.038732 358.18347 61.762695 358.18347 curveto
+61.334307 358.18347 60.89225 358.14018 60.436523 358.05359 curveto
+59.98535 357.97156 59.518228 357.84623 59.035156 357.67761 curveto
+59.035156 356.3446 lineto
+59.417967 356.56791 59.837238 356.73653 60.292969 356.85046 curveto
+60.748695 356.9644 61.224932 357.02136 61.72168 357.02136 curveto
+62.58756 357.02136 63.246088 356.85047 63.697266 356.50867 curveto
+64.152989 356.16687 64.380853 355.67013 64.380859 355.01843 curveto
+64.380853 354.41687 64.168939 353.94747 63.745117 353.61023 curveto
+63.325841 353.26844 62.74023 353.09754 61.988281 353.09753 curveto
+60.798828 353.09753 lineto
+60.798828 351.96277 lineto
+62.042969 351.96277 lineto
+62.722 351.96277 63.241531 351.82833 63.601562 351.55945 curveto
+63.961583 351.28602 64.141595 350.89409 64.141602 350.38367 curveto
+64.141595 349.85959 63.954747 349.45855 63.581055 349.18054 curveto
+63.211909 348.898 62.680985 348.75672 61.988281 348.75671 curveto
+61.610022 348.75672 61.204424 348.79774 60.771484 348.87976 curveto
+60.338539 348.9618 59.862303 349.08941 59.342773 349.26257 curveto
+59.342773 348.0321 lineto
+59.86686 347.88628 60.356768 347.77691 60.8125 347.70398 curveto
+61.272783 347.63107 61.705725 347.59461 62.111328 347.5946 curveto
+63.1595 347.59461 63.988926 347.83387 64.599609 348.31238 curveto
+65.210279 348.78635 65.515617 349.42892 65.515625 350.24011 curveto
+65.515617 350.80522 65.353834 351.28374 65.030273 351.67566 curveto
+64.706699 352.06303 64.246413 352.33191 63.649414 352.4823 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+68.523438 356.2489 moveto
+69.96582 356.2489 lineto
+69.96582 357.42468 lineto
+68.844727 359.61218 lineto
+67.962891 359.61218 lineto
+68.523438 357.42468 lineto
+68.523438 356.2489 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+77.020508 352.4823 moveto
+77.681309 352.62358 78.196282 352.91753 78.56543 353.36414 curveto
+78.93912 353.81075 79.125969 354.36219 79.125977 355.01843 curveto
+79.125969 356.0256 78.779615 356.80489 78.086914 357.35632 curveto
+77.3942 357.90776 76.409826 358.18347 75.133789 358.18347 curveto
+74.7054 358.18347 74.263343 358.14018 73.807617 358.05359 curveto
+73.356443 357.97156 72.889321 357.84623 72.40625 357.67761 curveto
+72.40625 356.3446 lineto
+72.789061 356.56791 73.208331 356.73653 73.664062 356.85046 curveto
+74.119789 356.9644 74.596025 357.02136 75.092773 357.02136 curveto
+75.958654 357.02136 76.617182 356.85047 77.068359 356.50867 curveto
+77.524082 356.16687 77.751947 355.67013 77.751953 355.01843 curveto
+77.751947 354.41687 77.540033 353.94747 77.116211 353.61023 curveto
+76.696935 353.26844 76.111323 353.09754 75.359375 353.09753 curveto
+74.169922 353.09753 lineto
+74.169922 351.96277 lineto
+75.414062 351.96277 lineto
+76.093094 351.96277 76.612625 351.82833 76.972656 351.55945 curveto
+77.332676 351.28602 77.512689 350.89409 77.512695 350.38367 curveto
+77.512689 349.85959 77.32584 349.45855 76.952148 349.18054 curveto
+76.583003 348.898 76.052079 348.75672 75.359375 348.75671 curveto
+74.981116 348.75672 74.575518 348.79774 74.142578 348.87976 curveto
+73.709633 348.9618 73.233397 349.08941 72.713867 349.26257 curveto
+72.713867 348.0321 lineto
+73.237954 347.88628 73.727862 347.77691 74.183594 347.70398 curveto
+74.643877 347.63107 75.076819 347.59461 75.482422 347.5946 curveto
+76.530594 347.59461 77.36002 347.83387 77.970703 348.31238 curveto
+78.581373 348.78635 78.886711 349.42892 78.886719 350.24011 curveto
+78.886711 350.80522 78.724928 351.28374 78.401367 351.67566 curveto
+78.077793 352.06303 77.617507 352.33191 77.020508 352.4823 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+81.894531 356.2489 moveto
+83.336914 356.2489 lineto
+83.336914 357.42468 lineto
+82.21582 359.61218 lineto
+81.333984 359.61218 lineto
+81.894531 357.42468 lineto
+81.894531 356.2489 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+88.99707 351.21082 moveto
+88.322588 351.21082 87.789385 351.47514 87.397461 352.00378 curveto
+87.005532 352.52788 86.809568 353.24793 86.80957 354.16394 curveto
+86.809568 355.07996 87.003253 355.80229 87.390625 356.33093 curveto
+87.782549 356.85502 88.31803 357.11707 88.99707 357.11707 curveto
+89.666987 357.11707 90.197911 356.85274 90.589844 356.3241 curveto
+90.981765 355.79545 91.177728 355.0754 91.177734 354.16394 curveto
+91.177728 353.25704 90.981765 352.53927 90.589844 352.01062 curveto
+90.197911 351.47742 89.666987 351.21082 88.99707 351.21082 curveto
+88.99707 350.14441 moveto
+90.090815 350.14442 90.949864 350.49989 91.574219 351.21082 curveto
+92.19856 351.92176 92.510734 352.90613 92.510742 354.16394 curveto
+92.510734 355.4172 92.19856 356.40157 91.574219 357.11707 curveto
+90.949864 357.828 90.090815 358.18347 88.99707 358.18347 curveto
+87.89876 358.18347 87.037433 357.828 86.413086 357.11707 curveto
+85.793293 356.40157 85.483398 355.4172 85.483398 354.16394 curveto
+85.483398 352.90613 85.793293 351.92176 86.413086 351.21082 curveto
+87.037433 350.49989 87.89876 350.14442 88.99707 350.14441 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+94.390625 347.36218 moveto
+95.484375 347.36218 lineto
+96.167966 348.43771 96.678382 349.49045 97.015625 350.52039 curveto
+97.357418 351.55034 97.528316 352.57345 97.52832 353.58972 curveto
+97.528316 354.61056 97.357418 355.63823 97.015625 356.67273 curveto
+96.678382 357.70723 96.167966 358.75997 95.484375 359.83093 curveto
+94.390625 359.83093 lineto
+94.996743 358.78731 95.447914 357.75053 95.744141 356.72058 curveto
+96.044919 355.68608 96.19531 354.64246 96.195312 353.58972 curveto
+96.19531 352.53699 96.044919 351.49793 95.744141 350.47253 curveto
+95.447914 349.44715 94.996743 348.41037 94.390625 347.36218 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+102.34082 348.36218 moveto
+101.73014 349.41037 101.27669 350.44715 100.98047 351.47253 curveto
+100.68424 352.49793 100.53613 353.53699 100.53613 354.58972 curveto
+100.53613 355.64246 100.68424 356.68608 100.98047 357.72058 curveto
+101.28125 358.75053 101.7347 359.78731 102.34082 360.83093 curveto
+101.24707 360.83093 lineto
+100.56347 359.75997 100.05078 358.70723 99.708984 357.67273 curveto
+99.371743 356.63823 99.203124 355.61056 99.203125 354.58972 curveto
+99.203124 353.57345 99.371743 352.55034 99.708984 351.52039 curveto
+100.04622 350.49045 100.55892 349.43771 101.24707 348.36218 curveto
+102.34082 348.36218 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+108.75977 349.9823 moveto
+105.27344 355.43054 lineto
+108.75977 355.43054 lineto
+108.75977 349.9823 lineto
+108.39746 348.77917 moveto
+110.13379 348.77917 lineto
+110.13379 355.43054 lineto
+111.58984 355.43054 lineto
+111.58984 356.57898 lineto
+110.13379 356.57898 lineto
+110.13379 358.98523 lineto
+108.75977 358.98523 lineto
+108.75977 356.57898 lineto
+104.15234 356.57898 lineto
+104.15234 355.24597 lineto
+108.39746 348.77917 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+114.02344 357.2489 moveto
+115.46582 357.2489 lineto
+115.46582 358.42468 lineto
+114.34473 360.61218 lineto
+113.46289 360.61218 lineto
+114.02344 358.42468 lineto
+114.02344 357.2489 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+122.13086 349.9823 moveto
+118.64453 355.43054 lineto
+122.13086 355.43054 lineto
+122.13086 349.9823 lineto
+121.76855 348.77917 moveto
+123.50488 348.77917 lineto
+123.50488 355.43054 lineto
+124.96094 355.43054 lineto
+124.96094 356.57898 lineto
+123.50488 356.57898 lineto
+123.50488 358.98523 lineto
+122.13086 358.98523 lineto
+122.13086 356.57898 lineto
+117.52344 356.57898 lineto
+117.52344 355.24597 lineto
+121.76855 348.77917 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+127.39453 357.2489 moveto
+128.83691 357.2489 lineto
+128.83691 358.42468 lineto
+127.71582 360.61218 lineto
+126.83398 360.61218 lineto
+127.39453 358.42468 lineto
+127.39453 357.2489 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+134.49707 352.21082 moveto
+133.82259 352.21082 133.28938 352.47514 132.89746 353.00378 curveto
+132.50553 353.52788 132.30957 354.24793 132.30957 355.16394 curveto
+132.30957 356.07996 132.50325 356.80229 132.89062 357.33093 curveto
+133.28255 357.85502 133.81803 358.11707 134.49707 358.11707 curveto
+135.16699 358.11707 135.69791 357.85274 136.08984 357.3241 curveto
+136.48176 356.79545 136.67773 356.0754 136.67773 355.16394 curveto
+136.67773 354.25704 136.48176 353.53927 136.08984 353.01062 curveto
+135.69791 352.47742 135.16699 352.21082 134.49707 352.21082 curveto
+134.49707 351.14441 moveto
+135.59081 351.14442 136.44986 351.49989 137.07422 352.21082 curveto
+137.69856 352.92176 138.01073 353.90613 138.01074 355.16394 curveto
+138.01073 356.4172 137.69856 357.40157 137.07422 358.11707 curveto
+136.44986 358.828 135.59081 359.18347 134.49707 359.18347 curveto
+133.39876 359.18347 132.53743 358.828 131.91309 358.11707 curveto
+131.29329 357.40157 130.9834 356.4172 130.9834 355.16394 curveto
+130.9834 353.90613 131.29329 352.92176 131.91309 352.21082 curveto
+132.53743 351.49989 133.39876 351.14442 134.49707 351.14441 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+139.89062 348.36218 moveto
+140.98438 348.36218 lineto
+141.66797 349.43771 142.17838 350.49045 142.51562 351.52039 curveto
+142.85742 352.55034 143.02832 353.57345 143.02832 354.58972 curveto
+143.02832 355.61056 142.85742 356.63823 142.51562 357.67273 curveto
+142.17838 358.70723 141.66797 359.75997 140.98438 360.83093 curveto
+139.89062 360.83093 lineto
+140.49674 359.78731 140.94791 358.75053 141.24414 357.72058 curveto
+141.54492 356.68608 141.69531 355.64246 141.69531 354.58972 curveto
+141.69531 353.53699 141.54492 352.49793 141.24414 351.47253 curveto
+140.94791 350.44715 140.49674 349.41037 139.89062 348.36218 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+27.34082 371.86218 moveto
+26.730139 372.91037 26.276689 373.94715 25.980469 374.97253 curveto
+25.684242 375.99793 25.53613 377.03699 25.536133 378.08972 curveto
+25.53613 379.14246 25.684242 380.18608 25.980469 381.22058 curveto
+26.281247 382.25053 26.734697 383.28731 27.34082 384.33093 curveto
+26.24707 384.33093 lineto
+25.563474 383.25997 25.050779 382.20723 24.708984 381.17273 curveto
+24.371743 380.13823 24.203124 379.11056 24.203125 378.08972 curveto
+24.203124 377.07345 24.371743 376.05034 24.708984 375.02039 curveto
+25.046222 373.99045 25.558917 372.93771 26.24707 371.86218 curveto
+27.34082 371.86218 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+29.979492 372.27917 moveto
+35.400391 372.27917 lineto
+35.400391 373.44128 lineto
+31.244141 373.44128 lineto
+31.244141 375.94324 lineto
+31.444658 375.87488 31.645179 375.82475 31.845703 375.79285 curveto
+32.04622 375.7564 32.246741 375.73817 32.447266 375.73816 curveto
+33.586583 375.73817 34.488926 376.05034 35.154297 376.67468 curveto
+35.819654 377.29904 36.152336 378.14441 36.152344 379.21082 curveto
+36.152336 380.30912 35.81054 381.16362 35.126953 381.77429 curveto
+34.443353 382.38041 33.479487 382.68347 32.235352 382.68347 curveto
+31.806963 382.68347 31.369463 382.64701 30.922852 382.5741 curveto
+30.480792 382.50118 30.022785 382.39181 29.548828 382.24597 curveto
+29.548828 380.85828 lineto
+29.958983 381.08159 30.382811 381.24793 30.820312 381.3573 curveto
+31.25781 381.46668 31.720374 381.52136 32.208008 381.52136 curveto
+32.996415 381.52136 33.620763 381.31401 34.081055 380.89929 curveto
+34.541335 380.48458 34.771478 379.92176 34.771484 379.21082 curveto
+34.771478 378.49988 34.541335 377.93706 34.081055 377.52234 curveto
+33.620763 377.10763 32.996415 376.90027 32.208008 376.90027 curveto
+31.838864 376.90027 31.469724 376.94129 31.100586 377.02332 curveto
+30.736 377.10535 30.362303 377.23296 29.979492 377.40613 curveto
+29.979492 372.27917 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+39.023438 380.7489 moveto
+40.46582 380.7489 lineto
+40.46582 381.92468 lineto
+39.344727 384.11218 lineto
+38.462891 384.11218 lineto
+39.023438 381.92468 lineto
+39.023438 380.7489 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+43.350586 372.27917 moveto
+48.771484 372.27917 lineto
+48.771484 373.44128 lineto
+44.615234 373.44128 lineto
+44.615234 375.94324 lineto
+44.815752 375.87488 45.016273 375.82475 45.216797 375.79285 curveto
+45.417314 375.7564 45.617835 375.73817 45.818359 375.73816 curveto
+46.957677 375.73817 47.86002 376.05034 48.525391 376.67468 curveto
+49.190748 377.29904 49.52343 378.14441 49.523438 379.21082 curveto
+49.52343 380.30912 49.181633 381.16362 48.498047 381.77429 curveto
+47.814447 382.38041 46.850581 382.68347 45.606445 382.68347 curveto
+45.178057 382.68347 44.740557 382.64701 44.293945 382.5741 curveto
+43.851886 382.50118 43.393879 382.39181 42.919922 382.24597 curveto
+42.919922 380.85828 lineto
+43.330077 381.08159 43.753904 381.24793 44.191406 381.3573 curveto
+44.628903 381.46668 45.091468 381.52136 45.579102 381.52136 curveto
+46.367508 381.52136 46.991857 381.31401 47.452148 380.89929 curveto
+47.912429 380.48458 48.142572 379.92176 48.142578 379.21082 curveto
+48.142572 378.49988 47.912429 377.93706 47.452148 377.52234 curveto
+46.991857 377.10763 46.367508 376.90027 45.579102 376.90027 curveto
+45.209958 376.90027 44.840817 376.94129 44.47168 377.02332 curveto
+44.107094 377.10535 43.733397 377.23296 43.350586 377.40613 curveto
+43.350586 372.27917 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+52.394531 380.7489 moveto
+53.836914 380.7489 lineto
+53.836914 381.92468 lineto
+52.71582 384.11218 lineto
+51.833984 384.11218 lineto
+52.394531 381.92468 lineto
+52.394531 380.7489 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+59.49707 375.71082 moveto
+58.822588 375.71082 58.289385 375.97514 57.897461 376.50378 curveto
+57.505532 377.02788 57.309568 377.74793 57.30957 378.66394 curveto
+57.309568 379.57996 57.503253 380.30229 57.890625 380.83093 curveto
+58.282549 381.35502 58.81803 381.61707 59.49707 381.61707 curveto
+60.166987 381.61707 60.697911 381.35274 61.089844 380.8241 curveto
+61.481765 380.29545 61.677728 379.5754 61.677734 378.66394 curveto
+61.677728 377.75704 61.481765 377.03927 61.089844 376.51062 curveto
+60.697911 375.97742 60.166987 375.71082 59.49707 375.71082 curveto
+59.49707 374.64441 moveto
+60.590815 374.64442 61.449864 374.99989 62.074219 375.71082 curveto
+62.69856 376.42176 63.010734 377.40613 63.010742 378.66394 curveto
+63.010734 379.9172 62.69856 380.90157 62.074219 381.61707 curveto
+61.449864 382.328 60.590815 382.68347 59.49707 382.68347 curveto
+58.39876 382.68347 57.537433 382.328 56.913086 381.61707 curveto
+56.293293 380.90157 55.983398 379.9172 55.983398 378.66394 curveto
+55.983398 377.40613 56.293293 376.42176 56.913086 375.71082 curveto
+57.537433 374.99989 58.39876 374.64442 59.49707 374.64441 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+64.890625 371.86218 moveto
+65.984375 371.86218 lineto
+66.667966 372.93771 67.178382 373.99045 67.515625 375.02039 curveto
+67.857418 376.05034 68.028316 377.07345 68.02832 378.08972 curveto
+68.028316 379.11056 67.857418 380.13823 67.515625 381.17273 curveto
+67.178382 382.20723 66.667966 383.25997 65.984375 384.33093 curveto
+64.890625 384.33093 lineto
+65.496743 383.28731 65.947914 382.25053 66.244141 381.22058 curveto
+66.544919 380.18608 66.69531 379.14246 66.695312 378.08972 curveto
+66.69531 377.03699 66.544919 375.99793 66.244141 374.97253 curveto
+65.947914 373.94715 65.496743 372.91037 64.890625 371.86218 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+75.84082 370.86218 moveto
+75.230139 371.91037 74.776689 372.94715 74.480469 373.97253 curveto
+74.184242 374.99793 74.03613 376.03699 74.036133 377.08972 curveto
+74.03613 378.14246 74.184242 379.18608 74.480469 380.22058 curveto
+74.781247 381.25053 75.234697 382.28731 75.84082 383.33093 curveto
+74.74707 383.33093 lineto
+74.063474 382.25997 73.550779 381.20723 73.208984 380.17273 curveto
+72.871743 379.13823 72.703124 378.11056 72.703125 377.08972 curveto
+72.703124 376.07345 72.871743 375.05034 73.208984 374.02039 curveto
+73.546222 372.99045 74.058917 371.93771 74.74707 370.86218 curveto
+75.84082 370.86218 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+81.589844 375.83191 moveto
+80.970048 375.83191 80.477861 376.04383 80.113281 376.46765 curveto
+79.753252 376.89148 79.57324 377.47254 79.573242 378.21082 curveto
+79.57324 378.94454 79.753252 379.5256 80.113281 379.95398 curveto
+80.477861 380.37781 80.970048 380.58972 81.589844 380.58972 curveto
+82.20963 380.58972 82.699539 380.37781 83.05957 379.95398 curveto
+83.424147 379.5256 83.606439 378.94454 83.606445 378.21082 curveto
+83.606439 377.47254 83.424147 376.89148 83.05957 376.46765 curveto
+82.699539 376.04383 82.20963 375.83191 81.589844 375.83191 curveto
+84.331055 371.50476 moveto
+84.331055 372.76257 lineto
+83.984694 372.59852 83.633782 372.47319 83.27832 372.3866 curveto
+82.927403 372.30002 82.57877 372.25672 82.232422 372.25671 curveto
+81.320959 372.25672 80.623694 372.56434 80.140625 373.17957 curveto
+79.662107 373.79481 79.388669 374.72449 79.320312 375.96863 curveto
+79.58919 375.57215 79.926429 375.26909 80.332031 375.05945 curveto
+80.737626 374.84526 81.184241 374.73817 81.671875 374.73816 curveto
+82.69726 374.73817 83.506178 375.05034 84.098633 375.67468 curveto
+84.69563 376.29448 84.994133 377.13986 84.994141 378.21082 curveto
+84.994133 379.25899 84.684237 380.09981 84.064453 380.73328 curveto
+83.444655 381.36674 82.619786 381.68347 81.589844 381.68347 curveto
+80.409502 381.68347 79.507159 381.2323 78.882812 380.32996 curveto
+78.258462 379.42306 77.946288 378.11056 77.946289 376.39246 curveto
+77.946288 374.77918 78.3291 373.49403 79.094727 372.53699 curveto
+79.860349 371.57541 80.888017 371.09461 82.177734 371.0946 curveto
+82.524083 371.09461 82.872715 371.12879 83.223633 371.19714 curveto
+83.579095 371.26551 83.948235 371.36805 84.331055 371.50476 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+87.523438 379.7489 moveto
+88.96582 379.7489 lineto
+88.96582 380.92468 lineto
+87.844727 383.11218 lineto
+86.962891 383.11218 lineto
+87.523438 380.92468 lineto
+87.523438 379.7489 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+94.960938 375.83191 moveto
+94.341142 375.83191 93.848955 376.04383 93.484375 376.46765 curveto
+93.124346 376.89148 92.944333 377.47254 92.944336 378.21082 curveto
+92.944333 378.94454 93.124346 379.5256 93.484375 379.95398 curveto
+93.848955 380.37781 94.341142 380.58972 94.960938 380.58972 curveto
+95.580724 380.58972 96.070632 380.37781 96.430664 379.95398 curveto
+96.795241 379.5256 96.977532 378.94454 96.977539 378.21082 curveto
+96.977532 377.47254 96.795241 376.89148 96.430664 376.46765 curveto
+96.070632 376.04383 95.580724 375.83191 94.960938 375.83191 curveto
+97.702148 371.50476 moveto
+97.702148 372.76257 lineto
+97.355787 372.59852 97.004876 372.47319 96.649414 372.3866 curveto
+96.298497 372.30002 95.949864 372.25672 95.603516 372.25671 curveto
+94.692053 372.25672 93.994788 372.56434 93.511719 373.17957 curveto
+93.0332 373.79481 92.759763 374.72449 92.691406 375.96863 curveto
+92.960284 375.57215 93.297523 375.26909 93.703125 375.05945 curveto
+94.10872 374.84526 94.555334 374.73817 95.042969 374.73816 curveto
+96.068354 374.73817 96.877272 375.05034 97.469727 375.67468 curveto
+98.066724 376.29448 98.365226 377.13986 98.365234 378.21082 curveto
+98.365226 379.25899 98.055331 380.09981 97.435547 380.73328 curveto
+96.815749 381.36674 95.99088 381.68347 94.960938 381.68347 curveto
+93.780596 381.68347 92.878253 381.2323 92.253906 380.32996 curveto
+91.629556 379.42306 91.317382 378.11056 91.317383 376.39246 curveto
+91.317382 374.77918 91.700194 373.49403 92.46582 372.53699 curveto
+93.231442 371.57541 94.259111 371.09461 95.548828 371.0946 curveto
+95.895177 371.09461 96.243809 371.12879 96.594727 371.19714 curveto
+96.950189 371.26551 97.319329 371.36805 97.702148 371.50476 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+100.89453 379.7489 moveto
+102.33691 379.7489 lineto
+102.33691 380.92468 lineto
+101.21582 383.11218 lineto
+100.33398 383.11218 lineto
+100.89453 380.92468 lineto
+100.89453 379.7489 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+110.06836 377.56824 moveto
+110.06835 376.65678 109.87923 375.9504 109.50098 375.4491 curveto
+109.12727 374.9478 108.60091 374.69715 107.92188 374.69714 curveto
+107.24739 374.69715 106.72103 374.9478 106.34277 375.4491 curveto
+105.96907 375.9504 105.78222 376.65678 105.78223 377.56824 curveto
+105.78222 378.47514 105.96907 379.17924 106.34277 379.68054 curveto
+106.72103 380.18185 107.24739 380.4325 107.92188 380.4325 curveto
+108.60091 380.4325 109.12727 380.18185 109.50098 379.68054 curveto
+109.87923 379.17924 110.06835 378.47514 110.06836 377.56824 curveto
+111.32617 380.53503 moveto
+111.32616 381.83842 111.03678 382.80684 110.45801 383.44031 curveto
+109.87923 384.07833 108.99283 384.39734 107.79883 384.39734 curveto
+107.35677 384.39734 106.93978 384.36316 106.54785 384.2948 curveto
+106.15592 384.23099 105.77539 384.13073 105.40625 383.99402 curveto
+105.40625 382.77039 lineto
+105.77539 382.97091 106.13997 383.11902 106.5 383.21472 curveto
+106.86002 383.31042 107.22688 383.35827 107.60059 383.35828 curveto
+108.42545 383.35827 109.04296 383.1418 109.45312 382.70886 curveto
+109.86328 382.28048 110.06835 381.63106 110.06836 380.76062 curveto
+110.06836 380.13855 lineto
+109.80859 380.58972 109.47591 380.92696 109.07031 381.15027 curveto
+108.66471 381.37358 108.17936 381.48523 107.61426 381.48523 curveto
+106.67545 381.48523 105.91894 381.12748 105.34473 380.41199 curveto
+104.77051 379.69649 104.4834 378.74858 104.4834 377.56824 curveto
+104.4834 376.38335 104.77051 375.43315 105.34473 374.71765 curveto
+105.91894 374.00216 106.67545 373.64442 107.61426 373.64441 curveto
+108.17936 373.64442 108.66471 373.75607 109.07031 373.97937 curveto
+109.47591 374.20268 109.80859 374.53992 110.06836 374.99109 curveto
+110.06836 373.82898 lineto
+111.32617 373.82898 lineto
+111.32617 380.53503 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+113.71875 370.86218 moveto
+114.8125 370.86218 lineto
+115.49609 371.93771 116.00651 372.99045 116.34375 374.02039 curveto
+116.68554 375.05034 116.85644 376.07345 116.85645 377.08972 curveto
+116.85644 378.11056 116.68554 379.13823 116.34375 380.17273 curveto
+116.00651 381.20723 115.49609 382.25997 114.8125 383.33093 curveto
+113.71875 383.33093 lineto
+114.32487 382.28731 114.77604 381.25053 115.07227 380.22058 curveto
+115.37304 379.18608 115.52343 378.14246 115.52344 377.08972 curveto
+115.52343 376.03699 115.37304 374.99793 115.07227 373.97253 curveto
+114.77604 372.94715 114.32487 371.91037 113.71875 370.86218 curveto
+fill
+grestore
+gsave [1.04082 0 0 0.999696 -220.2817 5.106745] concat
+0 0 0 setrgbcolor
+[] 0 setdash
+1.5 setlinewidth
+0 setlinejoin
+0 setlinecap
+newpath
+349.5 351.36218 moveto
+349.5 378.68618 319.484 400.86218 282.5 400.86218 curveto
+245.516 400.86218 215.5 378.68618 215.5 351.36218 curveto
+215.5 324.03818 245.516 301.86218 282.5 301.86218 curveto
+319.484 301.86218 349.5 324.03818 349.5 351.36218 curveto
+closepath
+stroke
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+66.874023 311.95789 moveto
+68.254883 311.95789 lineto
+68.254883 322.16394 lineto
+66.874023 322.16394 lineto
+66.874023 311.95789 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+75.829102 314.73328 moveto
+75.829102 315.92273 lineto
+75.473627 315.74044 75.104487 315.60373 74.72168 315.51257 curveto
+74.338862 315.42143 73.942378 315.37586 73.532227 315.37585 curveto
+72.907874 315.37586 72.438474 315.47156 72.124023 315.66296 curveto
+71.814125 315.85438 71.659178 316.14149 71.65918 316.52429 curveto
+71.659178 316.81596 71.770831 317.04611 71.994141 317.21472 curveto
+72.217445 317.37879 72.666338 317.53602 73.34082 317.6864 curveto
+73.771484 317.7821 lineto
+74.664709 317.97351 75.298171 318.24467 75.671875 318.59558 curveto
+76.050124 318.94194 76.239251 319.42729 76.239258 320.05164 curveto
+76.239251 320.76257 75.956699 321.3254 75.391602 321.74011 curveto
+74.831049 322.15483 74.058589 322.36218 73.074219 322.36218 curveto
+72.664059 322.36218 72.235674 322.32117 71.789062 322.23914 curveto
+71.347003 322.16166 70.879882 322.04317 70.387695 321.88367 curveto
+70.387695 320.58484 lineto
+70.852538 320.82638 71.310545 321.00867 71.761719 321.13171 curveto
+72.212888 321.2502 72.659502 321.30945 73.101562 321.30945 curveto
+73.694006 321.30945 74.149735 321.20919 74.46875 321.00867 curveto
+74.787755 320.80359 74.94726 320.51648 74.947266 320.14734 curveto
+74.94726 319.80554 74.831049 319.5435 74.598633 319.36121 curveto
+74.370763 319.17892 73.867183 319.00346 73.087891 318.83484 curveto
+72.650391 318.7323 lineto
+71.871092 318.56824 71.308267 318.31759 70.961914 317.98035 curveto
+70.615559 317.63855 70.442382 317.17143 70.442383 316.57898 curveto
+70.442382 315.85893 70.69759 315.30294 71.208008 314.91101 curveto
+71.718422 314.51909 72.443031 314.32313 73.381836 314.32312 curveto
+73.846675 314.32313 74.284175 314.35731 74.694336 314.42566 curveto
+75.104487 314.49403 75.482742 314.59657 75.829102 314.73328 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+6.3125 439.27167 moveto
+5.7018192 440.31985 5.2483691 441.35664 4.9521484 442.38202 curveto
+4.6559218 443.40742 4.50781 444.44648 4.5078125 445.49921 curveto
+4.50781 446.55194 4.6559218 447.59556 4.9521484 448.63007 curveto
+5.2529264 449.66001 5.7063765 450.6968 6.3125 451.74042 curveto
+5.21875 451.74042 lineto
+4.5351537 450.66945 4.0224589 449.61672 3.6806641 448.58221 curveto
+3.3434231 447.54771 3.1748035 446.52004 3.1748047 445.49921 curveto
+3.1748035 444.48294 3.3434231 443.45982 3.6806641 442.42987 curveto
+4.0179016 441.39993 4.5305964 440.3472 5.21875 439.27167 curveto
+6.3125 439.27167 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+11.890625 440.59784 moveto
+11.179684 440.59785 10.644203 440.94876 10.28418 441.65057 curveto
+9.9287084 442.34785 9.7509743 443.3983 9.7509766 444.80194 curveto
+9.7509743 446.20103 9.9287084 447.25149 10.28418 447.95331 curveto
+10.644203 448.65057 11.179684 448.99921 11.890625 448.99921 curveto
+12.606115 448.99921 13.141596 448.65057 13.49707 447.95331 curveto
+13.85709 447.25149 14.037103 446.20103 14.037109 444.80194 curveto
+14.037103 443.3983 13.85709 442.34785 13.49707 441.65057 curveto
+13.141596 440.94876 12.606115 440.59785 11.890625 440.59784 curveto
+11.890625 439.50409 moveto
+13.0345 439.5041 13.90722 439.95755 14.508789 440.86444 curveto
+15.114901 441.76679 15.417961 443.07929 15.417969 444.80194 curveto
+15.417961 446.52004 15.114901 447.83254 14.508789 448.73944 curveto
+13.90722 449.64178 13.0345 450.09296 11.890625 450.09296 curveto
+10.746741 450.09296 9.8717424 449.64178 9.265625 448.73944 curveto
+8.6640613 447.83254 8.3632803 446.52004 8.3632812 444.80194 curveto
+8.3632803 443.07929 8.6640613 441.76679 9.265625 440.86444 curveto
+9.8717424 439.95755 10.746741 439.5041 11.890625 439.50409 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+17.995117 448.15839 moveto
+19.4375 448.15839 lineto
+19.4375 449.33417 lineto
+18.316406 451.52167 lineto
+17.43457 451.52167 lineto
+17.995117 449.33417 lineto
+17.995117 448.15839 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+25.261719 440.59784 moveto
+24.550778 440.59785 24.015296 440.94876 23.655273 441.65057 curveto
+23.299802 442.34785 23.122068 443.3983 23.12207 444.80194 curveto
+23.122068 446.20103 23.299802 447.25149 23.655273 447.95331 curveto
+24.015296 448.65057 24.550778 448.99921 25.261719 448.99921 curveto
+25.977208 448.99921 26.51269 448.65057 26.868164 447.95331 curveto
+27.228184 447.25149 27.408197 446.20103 27.408203 444.80194 curveto
+27.408197 443.3983 27.228184 442.34785 26.868164 441.65057 curveto
+26.51269 440.94876 25.977208 440.59785 25.261719 440.59784 curveto
+25.261719 439.50409 moveto
+26.405593 439.5041 27.278314 439.95755 27.879883 440.86444 curveto
+28.485995 441.76679 28.789055 443.07929 28.789062 444.80194 curveto
+28.789055 446.52004 28.485995 447.83254 27.879883 448.73944 curveto
+27.278314 449.64178 26.405593 450.09296 25.261719 450.09296 curveto
+24.117835 450.09296 23.242836 449.64178 22.636719 448.73944 curveto
+22.035155 447.83254 21.734374 446.52004 21.734375 444.80194 curveto
+21.734374 443.07929 22.035155 441.76679 22.636719 440.86444 curveto
+23.242836 439.95755 24.117835 439.5041 25.261719 439.50409 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+31.366211 448.15839 moveto
+32.808594 448.15839 lineto
+32.808594 449.33417 lineto
+31.6875 451.52167 lineto
+30.805664 451.52167 lineto
+31.366211 449.33417 lineto
+31.366211 448.15839 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+38.46875 443.1203 moveto
+37.794267 443.12031 37.261065 443.38463 36.869141 443.91327 curveto
+36.477211 444.43736 36.281248 445.15741 36.28125 446.07343 curveto
+36.281248 446.98944 36.474933 447.71177 36.862305 448.24042 curveto
+37.254229 448.76451 37.78971 449.02655 38.46875 449.02655 curveto
+39.138667 449.02655 39.669591 448.76223 40.061523 448.23358 curveto
+40.453444 447.70494 40.649408 446.98489 40.649414 446.07343 curveto
+40.649408 445.16653 40.453444 444.44876 40.061523 443.9201 curveto
+39.669591 443.38691 39.138667 443.12031 38.46875 443.1203 curveto
+38.46875 442.05389 moveto
+39.562495 442.0539 40.421543 442.40937 41.045898 443.1203 curveto
+41.67024 443.83124 41.982414 444.81562 41.982422 446.07343 curveto
+41.982414 447.32668 41.67024 448.31106 41.045898 449.02655 curveto
+40.421543 449.73749 39.562495 450.09296 38.46875 450.09296 curveto
+37.37044 450.09296 36.509112 449.73749 35.884766 449.02655 curveto
+35.264973 448.31106 34.955077 447.32668 34.955078 446.07343 curveto
+34.955077 444.81562 35.264973 443.83124 35.884766 443.1203 curveto
+36.509112 442.40937 37.37044 442.0539 38.46875 442.05389 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+43.862305 439.27167 moveto
+44.956055 439.27167 lineto
+45.639646 440.3472 46.150062 441.39993 46.487305 442.42987 curveto
+46.829097 443.45982 46.999996 444.48294 47 445.49921 curveto
+46.999996 446.52004 46.829097 447.54771 46.487305 448.58221 curveto
+46.150062 449.61672 45.639646 450.66945 44.956055 451.74042 curveto
+43.862305 451.74042 lineto
+44.468423 450.6968 44.919594 449.66001 45.21582 448.63007 curveto
+45.516599 447.59556 45.666989 446.55194 45.666992 445.49921 curveto
+45.666989 444.44648 45.516599 443.40742 45.21582 442.38202 curveto
+44.919594 441.35664 44.468423 440.31985 43.862305 439.27167 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+51.951172 439.27167 moveto
+51.340491 440.31985 50.887041 441.35664 50.59082 442.38202 curveto
+50.294594 443.40742 50.146482 444.44648 50.146484 445.49921 curveto
+50.146482 446.55194 50.294594 447.59556 50.59082 448.63007 curveto
+50.891598 449.66001 51.345048 450.6968 51.951172 451.74042 curveto
+50.857422 451.74042 lineto
+50.173826 450.66945 49.661131 449.61672 49.319336 448.58221 curveto
+48.982095 447.54771 48.813475 446.52004 48.813477 445.49921 curveto
+48.813475 444.48294 48.982095 443.45982 49.319336 442.42987 curveto
+49.656573 441.39993 50.169268 440.3472 50.857422 439.27167 curveto
+51.951172 439.27167 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+54.81543 448.7326 moveto
+57.071289 448.7326 lineto
+57.071289 440.94647 lineto
+54.617188 441.43866 lineto
+54.617188 440.18085 lineto
+57.057617 439.68866 lineto
+58.438477 439.68866 lineto
+58.438477 448.7326 lineto
+60.694336 448.7326 lineto
+60.694336 449.89471 lineto
+54.81543 449.89471 lineto
+54.81543 448.7326 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+63.633789 448.15839 moveto
+65.076172 448.15839 lineto
+65.076172 449.33417 lineto
+63.955078 451.52167 lineto
+63.073242 451.52167 lineto
+63.633789 449.33417 lineto
+63.633789 448.15839 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+68.186523 448.7326 moveto
+70.442383 448.7326 lineto
+70.442383 440.94647 lineto
+67.988281 441.43866 lineto
+67.988281 440.18085 lineto
+70.428711 439.68866 lineto
+71.80957 439.68866 lineto
+71.80957 448.7326 lineto
+74.06543 448.7326 lineto
+74.06543 449.89471 lineto
+68.186523 449.89471 lineto
+68.186523 448.7326 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+77.004883 448.15839 moveto
+78.447266 448.15839 lineto
+78.447266 449.33417 lineto
+77.326172 451.52167 lineto
+76.444336 451.52167 lineto
+77.004883 449.33417 lineto
+77.004883 448.15839 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+84.107422 443.1203 moveto
+83.432939 443.12031 82.899737 443.38463 82.507812 443.91327 curveto
+82.115883 444.43736 81.91992 445.15741 81.919922 446.07343 curveto
+81.91992 446.98944 82.113604 447.71177 82.500977 448.24042 curveto
+82.892901 448.76451 83.428382 449.02655 84.107422 449.02655 curveto
+84.777339 449.02655 85.308263 448.76223 85.700195 448.23358 curveto
+86.092116 447.70494 86.288079 446.98489 86.288086 446.07343 curveto
+86.288079 445.16653 86.092116 444.44876 85.700195 443.9201 curveto
+85.308263 443.38691 84.777339 443.12031 84.107422 443.1203 curveto
+84.107422 442.05389 moveto
+85.201166 442.0539 86.060215 442.40937 86.68457 443.1203 curveto
+87.308912 443.83124 87.621086 444.81562 87.621094 446.07343 curveto
+87.621086 447.32668 87.308912 448.31106 86.68457 449.02655 curveto
+86.060215 449.73749 85.201166 450.09296 84.107422 450.09296 curveto
+83.009111 450.09296 82.147784 449.73749 81.523438 449.02655 curveto
+80.903645 448.31106 80.593749 447.32668 80.59375 446.07343 curveto
+80.593749 444.81562 80.903645 443.83124 81.523438 443.1203 curveto
+82.147784 442.40937 83.009111 442.0539 84.107422 442.05389 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+89.500977 439.27167 moveto
+90.594727 439.27167 lineto
+91.278317 440.3472 91.788734 441.39993 92.125977 442.42987 curveto
+92.467769 443.45982 92.638668 444.48294 92.638672 445.49921 curveto
+92.638668 446.52004 92.467769 447.54771 92.125977 448.58221 curveto
+91.788734 449.61672 91.278317 450.66945 90.594727 451.74042 curveto
+89.500977 451.74042 lineto
+90.107095 450.6968 90.558266 449.66001 90.854492 448.63007 curveto
+91.155271 447.59556 91.305661 446.55194 91.305664 445.49921 curveto
+91.305661 444.44648 91.155271 443.40742 90.854492 442.38202 curveto
+90.558266 441.35664 90.107095 440.31985 89.500977 439.27167 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+51.951172 458.80682 moveto
+51.340491 459.85501 50.887041 460.89179 50.59082 461.91718 curveto
+50.294594 462.94257 50.146482 463.98163 50.146484 465.03436 curveto
+50.146482 466.0871 50.294594 467.13072 50.59082 468.16522 curveto
+50.891598 469.19517 51.345048 470.23195 51.951172 471.27557 curveto
+50.857422 471.27557 lineto
+50.173826 470.20461 49.661131 469.15188 49.319336 468.11737 curveto
+48.982095 467.08287 48.813475 466.0552 48.813477 465.03436 curveto
+48.813475 464.01809 48.982095 462.99498 49.319336 461.96503 curveto
+49.656573 460.93509 50.169268 459.88235 50.857422 458.80682 curveto
+51.951172 458.80682 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+54.589844 459.22382 moveto
+60.010742 459.22382 lineto
+60.010742 460.38593 lineto
+55.854492 460.38593 lineto
+55.854492 462.88788 lineto
+56.05501 462.81953 56.255531 462.7694 56.456055 462.73749 curveto
+56.656572 462.70104 56.857093 462.68281 57.057617 462.6828 curveto
+58.196935 462.68281 59.099278 462.99498 59.764648 463.61932 curveto
+60.430006 464.24368 60.762688 465.08905 60.762695 466.15546 curveto
+60.762688 467.25377 60.420891 468.10826 59.737305 468.71893 curveto
+59.053705 469.32505 58.089839 469.62811 56.845703 469.62811 curveto
+56.417314 469.62811 55.979815 469.59165 55.533203 469.51874 curveto
+55.091144 469.44582 54.633136 469.33645 54.15918 469.19061 curveto
+54.15918 467.80292 lineto
+54.569334 468.02623 54.993162 468.19257 55.430664 468.30194 curveto
+55.868161 468.41132 56.330726 468.466 56.818359 468.466 curveto
+57.606766 468.466 58.231115 468.25865 58.691406 467.84393 curveto
+59.151687 467.42922 59.38183 466.8664 59.381836 466.15546 curveto
+59.38183 465.44452 59.151687 464.8817 58.691406 464.46698 curveto
+58.231115 464.05227 57.606766 463.84492 56.818359 463.84491 curveto
+56.449215 463.84492 56.080075 463.88593 55.710938 463.96796 curveto
+55.346352 464.04999 54.972654 464.1776 54.589844 464.35077 curveto
+54.589844 459.22382 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+63.633789 467.69354 moveto
+65.076172 467.69354 lineto
+65.076172 468.86932 lineto
+63.955078 471.05682 lineto
+63.073242 471.05682 lineto
+63.633789 468.86932 lineto
+63.633789 467.69354 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+71.741211 460.42694 moveto
+68.254883 465.87518 lineto
+71.741211 465.87518 lineto
+71.741211 460.42694 lineto
+71.378906 459.22382 moveto
+73.115234 459.22382 lineto
+73.115234 465.87518 lineto
+74.571289 465.87518 lineto
+74.571289 467.02362 lineto
+73.115234 467.02362 lineto
+73.115234 469.42987 lineto
+71.741211 469.42987 lineto
+71.741211 467.02362 lineto
+67.133789 467.02362 lineto
+67.133789 465.69061 lineto
+71.378906 459.22382 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+77.004883 467.69354 moveto
+78.447266 467.69354 lineto
+78.447266 468.86932 lineto
+77.326172 471.05682 lineto
+76.444336 471.05682 lineto
+77.004883 468.86932 lineto
+77.004883 467.69354 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+86.178711 465.51288 moveto
+86.178705 464.60142 85.989577 463.89505 85.611328 463.39374 curveto
+85.237625 462.89244 84.711258 462.64179 84.032227 462.64178 curveto
+83.357744 462.64179 82.831377 462.89244 82.453125 463.39374 curveto
+82.079425 463.89505 81.892576 464.60142 81.892578 465.51288 curveto
+81.892576 466.41978 82.079425 467.12388 82.453125 467.62518 curveto
+82.831377 468.12649 83.357744 468.37714 84.032227 468.37714 curveto
+84.711258 468.37714 85.237625 468.12649 85.611328 467.62518 curveto
+85.989577 467.12388 86.178705 466.41978 86.178711 465.51288 curveto
+87.436523 468.47968 moveto
+87.436516 469.78306 87.147128 470.75148 86.568359 471.38495 curveto
+85.989577 472.02297 85.103185 472.34198 83.90918 472.34198 curveto
+83.467119 472.34198 83.050127 472.3078 82.658203 472.23944 curveto
+82.266274 472.17564 81.88574 472.07538 81.516602 471.93866 curveto
+81.516602 470.71503 lineto
+81.88574 470.91555 82.250323 471.06366 82.610352 471.15936 curveto
+82.970374 471.25506 83.337236 471.30292 83.710938 471.30292 curveto
+84.535803 471.30292 85.153315 471.08644 85.563477 470.6535 curveto
+85.973627 470.22512 86.178705 469.5757 86.178711 468.70526 curveto
+86.178711 468.08319 lineto
+85.918939 468.53436 85.586257 468.8716 85.180664 469.09491 curveto
+84.77506 469.31822 84.289709 469.42987 83.724609 469.42987 curveto
+82.785804 469.42987 82.029295 469.07212 81.455078 468.35663 curveto
+80.880858 467.64114 80.593749 466.69322 80.59375 465.51288 curveto
+80.593749 464.32799 80.880858 463.37779 81.455078 462.66229 curveto
+82.029295 461.94681 82.785804 461.58906 83.724609 461.58905 curveto
+84.289709 461.58906 84.77506 461.70071 85.180664 461.92401 curveto
+85.586257 462.14733 85.918939 462.48457 86.178711 462.93573 curveto
+86.178711 461.77362 lineto
+87.436523 461.77362 lineto
+87.436523 468.47968 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+89.829102 458.80682 moveto
+90.922852 458.80682 lineto
+91.606442 459.88235 92.116859 460.93509 92.454102 461.96503 curveto
+92.795894 462.99498 92.966793 464.01809 92.966797 465.03436 curveto
+92.966793 466.0552 92.795894 467.08287 92.454102 468.11737 curveto
+92.116859 469.15188 91.606442 470.20461 90.922852 471.27557 curveto
+89.829102 471.27557 lineto
+90.43522 470.23195 90.886391 469.19517 91.182617 468.16522 curveto
+91.483396 467.13072 91.633786 466.0871 91.633789 465.03436 curveto
+91.633786 463.98163 91.483396 462.94257 91.182617 461.91718 curveto
+90.886391 460.89179 90.43522 459.85501 89.829102 458.80682 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+97.405273 439.73846 moveto
+96.794593 440.78665 96.341143 441.82343 96.044922 442.84882 curveto
+95.748695 443.87421 95.600583 444.91327 95.600586 445.966 curveto
+95.600583 447.01874 95.748695 448.06236 96.044922 449.09686 curveto
+96.3457 450.12681 96.79915 451.16359 97.405273 452.20721 curveto
+96.311523 452.20721 lineto
+95.627927 451.13625 95.115232 450.08352 94.773438 449.04901 curveto
+94.436197 448.01451 94.267577 446.98684 94.267578 445.966 curveto
+94.267577 444.94973 94.436197 443.92662 94.773438 442.89667 curveto
+95.110675 441.86673 95.62337 440.81399 96.311523 439.73846 curveto
+97.405273 439.73846 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+101.21973 449.1994 moveto
+106.03906 449.1994 lineto
+106.03906 450.36151 lineto
+99.558594 450.36151 lineto
+99.558594 449.1994 lineto
+100.08268 448.65709 100.7959 447.9302 101.69824 447.01874 curveto
+102.60514 446.10273 103.1748 445.51256 103.40723 445.24823 curveto
+103.84928 444.75149 104.1569 444.33222 104.33008 443.99042 curveto
+104.50781 443.64407 104.59667 443.30455 104.59668 442.97186 curveto
+104.59667 442.42955 104.40527 441.9875 104.02246 441.64569 curveto
+103.6442 441.3039 103.14973 441.133 102.53906 441.133 curveto
+102.10612 441.133 101.64811 441.2082 101.16504 441.35858 curveto
+100.68652 441.50898 100.17383 441.73685 99.626953 442.04218 curveto
+99.626953 440.64764 lineto
+100.18294 440.42435 100.70247 440.25573 101.18555 440.14178 curveto
+101.66862 440.02786 102.11067 439.9709 102.51172 439.97089 curveto
+103.56901 439.9709 104.4121 440.23522 105.04102 440.76385 curveto
+105.66991 441.29251 105.98437 441.99889 105.98438 442.883 curveto
+105.98437 443.30227 105.90462 443.70104 105.74512 444.07928 curveto
+105.59016 444.45299 105.30533 444.89505 104.89062 445.40546 curveto
+104.77669 445.53762 104.41438 445.92043 103.80371 446.55389 curveto
+103.19303 447.1828 102.3317 448.06464 101.21973 449.1994 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+109.08789 448.62518 moveto
+110.53027 448.62518 lineto
+110.53027 449.80096 lineto
+109.40918 451.98846 lineto
+108.52734 451.98846 lineto
+109.08789 449.80096 lineto
+109.08789 448.62518 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+114.59082 449.1994 moveto
+119.41016 449.1994 lineto
+119.41016 450.36151 lineto
+112.92969 450.36151 lineto
+112.92969 449.1994 lineto
+113.45377 448.65709 114.16699 447.9302 115.06934 447.01874 curveto
+115.97623 446.10273 116.54589 445.51256 116.77832 445.24823 curveto
+117.22037 444.75149 117.52799 444.33222 117.70117 443.99042 curveto
+117.8789 443.64407 117.96777 443.30455 117.96777 442.97186 curveto
+117.96777 442.42955 117.77636 441.9875 117.39355 441.64569 curveto
+117.01529 441.3039 116.52083 441.133 115.91016 441.133 curveto
+115.47721 441.133 115.0192 441.2082 114.53613 441.35858 curveto
+114.05762 441.50898 113.54492 441.73685 112.99805 442.04218 curveto
+112.99805 440.64764 lineto
+113.55403 440.42435 114.07357 440.25573 114.55664 440.14178 curveto
+115.03971 440.02786 115.48177 439.9709 115.88281 439.97089 curveto
+116.9401 439.9709 117.7832 440.23522 118.41211 440.76385 curveto
+119.04101 441.29251 119.35546 441.99889 119.35547 442.883 curveto
+119.35546 443.30227 119.27571 443.70104 119.11621 444.07928 curveto
+118.96126 444.45299 118.67643 444.89505 118.26172 445.40546 curveto
+118.14778 445.53762 117.78548 445.92043 117.1748 446.55389 curveto
+116.56412 447.1828 115.7028 448.06464 114.59082 449.1994 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+122.45898 448.62518 moveto
+123.90137 448.62518 lineto
+123.90137 449.80096 lineto
+122.78027 451.98846 lineto
+121.89844 451.98846 lineto
+122.45898 449.80096 lineto
+122.45898 448.62518 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+129.56152 443.5871 moveto
+128.88704 443.5871 128.35384 443.85143 127.96191 444.38007 curveto
+127.56998 444.90416 127.37402 445.62421 127.37402 446.54022 curveto
+127.37402 447.45624 127.56771 448.17857 127.95508 448.70721 curveto
+128.347 449.2313 128.88248 449.49335 129.56152 449.49335 curveto
+130.23144 449.49335 130.76236 449.22903 131.1543 448.70038 curveto
+131.54622 448.17173 131.74218 447.45168 131.74219 446.54022 curveto
+131.74218 445.63333 131.54622 444.91555 131.1543 444.3869 curveto
+130.76236 443.85371 130.23144 443.5871 129.56152 443.5871 curveto
+129.56152 442.52069 moveto
+130.65527 442.5207 131.51432 442.87617 132.13867 443.5871 curveto
+132.76301 444.29804 133.07519 445.28241 133.0752 446.54022 curveto
+133.07519 447.79348 132.76301 448.77785 132.13867 449.49335 curveto
+131.51432 450.20428 130.65527 450.55975 129.56152 450.55975 curveto
+128.46321 450.55975 127.60189 450.20428 126.97754 449.49335 curveto
+126.35775 448.77785 126.04785 447.79348 126.04785 446.54022 curveto
+126.04785 445.28241 126.35775 444.29804 126.97754 443.5871 curveto
+127.60189 442.87617 128.46321 442.5207 129.56152 442.52069 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+134.95508 439.73846 moveto
+136.04883 439.73846 lineto
+136.73242 440.81399 137.24284 441.86673 137.58008 442.89667 curveto
+137.92187 443.92662 138.09277 444.94973 138.09277 445.966 curveto
+138.09277 446.98684 137.92187 448.01451 137.58008 449.04901 curveto
+137.24284 450.08352 136.73242 451.13625 136.04883 452.20721 curveto
+134.95508 452.20721 lineto
+135.5612 451.16359 136.01237 450.12681 136.30859 449.09686 curveto
+136.60937 448.06236 136.75976 447.01874 136.75977 445.966 curveto
+136.75976 444.91327 136.60937 443.87421 136.30859 442.84882 curveto
+136.01237 441.82343 135.5612 440.78665 134.95508 439.73846 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+5.1376953 458.27362 moveto
+4.5270145 459.32181 4.0735644 460.35859 3.7773438 461.38397 curveto
+3.4811171 462.40937 3.3330053 463.44843 3.3330078 464.50116 curveto
+3.3330053 465.5539 3.4811171 466.59752 3.7773438 467.63202 curveto
+4.0781217 468.66197 4.5315718 469.69875 5.1376953 470.74237 curveto
+4.0439453 470.74237 lineto
+3.360349 469.67141 2.8476542 468.61867 2.5058594 467.58417 curveto
+2.1686184 466.54966 1.9999988 465.522 2 464.50116 curveto
+1.9999988 463.48489 2.1686184 462.46178 2.5058594 461.43182 curveto
+2.8430969 460.40188 3.3557917 459.34915 4.0439453 458.27362 curveto
+5.1376953 458.27362 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+11.946289 463.39374 moveto
+12.60709 463.53502 13.122063 463.82896 13.491211 464.27557 curveto
+13.864901 464.72219 14.05175 465.27362 14.051758 465.92987 curveto
+14.05175 466.93703 13.705396 467.71633 13.012695 468.26776 curveto
+12.319981 468.81919 11.335607 469.09491 10.05957 469.09491 curveto
+9.6311815 469.09491 9.1891247 469.05162 8.7333984 468.96503 curveto
+8.2822245 468.883 7.8151026 468.75767 7.3320312 468.58905 curveto
+7.3320312 467.25604 lineto
+7.7148423 467.47935 8.1341127 467.64797 8.5898438 467.7619 curveto
+9.0455701 467.87584 9.5218066 467.9328 10.018555 467.9328 curveto
+10.884435 467.9328 11.542963 467.7619 11.994141 467.4201 curveto
+12.449864 467.07831 12.677728 466.58157 12.677734 465.92987 curveto
+12.677728 465.32831 12.465814 464.85891 12.041992 464.52167 curveto
+11.622716 464.17988 11.037105 464.00898 10.285156 464.00897 curveto
+9.0957031 464.00897 lineto
+9.0957031 462.87421 lineto
+10.339844 462.87421 lineto
+11.018875 462.87421 11.538406 462.73977 11.898438 462.47089 curveto
+12.258458 462.19746 12.43847 461.80553 12.438477 461.2951 curveto
+12.43847 460.77102 12.251622 460.36998 11.87793 460.09198 curveto
+11.508784 459.80944 10.97786 459.66816 10.285156 459.66815 curveto
+9.9068974 459.66816 9.5012988 459.70918 9.0683594 459.7912 curveto
+8.6354143 459.87324 8.1591778 460.00084 7.6396484 460.17401 curveto
+7.6396484 458.94354 lineto
+8.1637351 458.79772 8.6536434 458.68834 9.109375 458.61542 curveto
+9.5696582 458.54251 10.0026 458.50605 10.408203 458.50604 curveto
+11.456375 458.50605 12.285801 458.74531 12.896484 459.22382 curveto
+13.507154 459.69778 13.812492 460.34036 13.8125 461.15155 curveto
+13.812492 461.71666 13.650709 462.19518 13.327148 462.5871 curveto
+13.003574 462.97447 12.543288 463.24335 11.946289 463.39374 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+16.820312 467.16034 moveto
+18.262695 467.16034 lineto
+18.262695 468.33612 lineto
+17.141602 470.52362 lineto
+16.259766 470.52362 lineto
+16.820312 468.33612 lineto
+16.820312 467.16034 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+25.317383 463.39374 moveto
+25.978184 463.53502 26.493157 463.82896 26.862305 464.27557 curveto
+27.235995 464.72219 27.422844 465.27362 27.422852 465.92987 curveto
+27.422844 466.93703 27.07649 467.71633 26.383789 468.26776 curveto
+25.691075 468.81919 24.706701 469.09491 23.430664 469.09491 curveto
+23.002275 469.09491 22.560218 469.05162 22.104492 468.96503 curveto
+21.653318 468.883 21.186196 468.75767 20.703125 468.58905 curveto
+20.703125 467.25604 lineto
+21.085936 467.47935 21.505206 467.64797 21.960938 467.7619 curveto
+22.416664 467.87584 22.8929 467.9328 23.389648 467.9328 curveto
+24.255529 467.9328 24.914057 467.7619 25.365234 467.4201 curveto
+25.820957 467.07831 26.048822 466.58157 26.048828 465.92987 curveto
+26.048822 465.32831 25.836908 464.85891 25.413086 464.52167 curveto
+24.99381 464.17988 24.408198 464.00898 23.65625 464.00897 curveto
+22.466797 464.00897 lineto
+22.466797 462.87421 lineto
+23.710938 462.87421 lineto
+24.389969 462.87421 24.9095 462.73977 25.269531 462.47089 curveto
+25.629551 462.19746 25.809564 461.80553 25.80957 461.2951 curveto
+25.809564 460.77102 25.622715 460.36998 25.249023 460.09198 curveto
+24.879878 459.80944 24.348954 459.66816 23.65625 459.66815 curveto
+23.277991 459.66816 22.872393 459.70918 22.439453 459.7912 curveto
+22.006508 459.87324 21.530272 460.00084 21.010742 460.17401 curveto
+21.010742 458.94354 lineto
+21.534829 458.79772 22.024737 458.68834 22.480469 458.61542 curveto
+22.940752 458.54251 23.373694 458.50605 23.779297 458.50604 curveto
+24.827469 458.50605 25.656895 458.74531 26.267578 459.22382 curveto
+26.878248 459.69778 27.183586 460.34036 27.183594 461.15155 curveto
+27.183586 461.71666 27.021803 462.19518 26.698242 462.5871 curveto
+26.374668 462.97447 25.914382 463.24335 25.317383 463.39374 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+30.191406 467.16034 moveto
+31.633789 467.16034 lineto
+31.633789 468.33612 lineto
+30.512695 470.52362 lineto
+29.630859 470.52362 lineto
+30.191406 468.33612 lineto
+30.191406 467.16034 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+39.365234 464.97968 moveto
+39.365228 464.06822 39.176101 463.36184 38.797852 462.86053 curveto
+38.424148 462.35924 37.897782 462.10859 37.21875 462.10858 curveto
+36.544267 462.10859 36.017901 462.35924 35.639648 462.86053 curveto
+35.265948 463.36184 35.079099 464.06822 35.079102 464.97968 curveto
+35.079099 465.88658 35.265948 466.59068 35.639648 467.09198 curveto
+36.017901 467.59328 36.544267 467.84393 37.21875 467.84393 curveto
+37.897782 467.84393 38.424148 467.59328 38.797852 467.09198 curveto
+39.176101 466.59068 39.365228 465.88658 39.365234 464.97968 curveto
+40.623047 467.94647 moveto
+40.623039 469.24986 40.333652 470.21828 39.754883 470.85175 curveto
+39.176101 471.48976 38.289708 471.80877 37.095703 471.80878 curveto
+36.653642 471.80877 36.23665 471.77459 35.844727 471.70624 curveto
+35.452797 471.64243 35.072264 471.54217 34.703125 471.40546 curveto
+34.703125 470.18182 lineto
+35.072264 470.38234 35.436847 470.53045 35.796875 470.62616 curveto
+36.156898 470.72186 36.52376 470.76971 36.897461 470.76971 curveto
+37.722326 470.76971 38.339838 470.55324 38.75 470.1203 curveto
+39.16015 469.69191 39.365228 469.0425 39.365234 468.17206 curveto
+39.365234 467.54999 lineto
+39.105463 468.00116 38.772781 468.3384 38.367188 468.56171 curveto
+37.961584 468.78501 37.476233 468.89667 36.911133 468.89667 curveto
+35.972328 468.89667 35.215818 468.53892 34.641602 467.82343 curveto
+34.067382 467.10793 33.780273 466.16002 33.780273 464.97968 curveto
+33.780273 463.79478 34.067382 462.84459 34.641602 462.12909 curveto
+35.215818 461.4136 35.972328 461.05586 36.911133 461.05585 curveto
+37.476233 461.05586 37.961584 461.16751 38.367188 461.39081 curveto
+38.772781 461.61412 39.105463 461.95136 39.365234 462.40253 curveto
+39.365234 461.24042 lineto
+40.623047 461.24042 lineto
+40.623047 467.94647 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+43.015625 458.27362 moveto
+44.109375 458.27362 lineto
+44.792966 459.34915 45.303382 460.40188 45.640625 461.43182 curveto
+45.982418 462.46178 46.153316 463.48489 46.15332 464.50116 curveto
+46.153316 465.522 45.982418 466.54966 45.640625 467.58417 curveto
+45.303382 468.61867 44.792966 469.67141 44.109375 470.74237 curveto
+43.015625 470.74237 lineto
+43.621743 469.69875 44.072914 468.66197 44.369141 467.63202 curveto
+44.669919 466.59752 44.82031 465.5539 44.820312 464.50116 curveto
+44.82031 463.44843 44.669919 462.40937 44.369141 461.38397 curveto
+44.072914 460.35859 43.621743 459.32181 43.015625 458.27362 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+98.233398 458.80682 moveto
+97.622718 459.85501 97.169268 460.89179 96.873047 461.91718 curveto
+96.57682 462.94257 96.428708 463.98163 96.428711 465.03436 curveto
+96.428708 466.0871 96.57682 467.13072 96.873047 468.16522 curveto
+97.173825 469.19517 97.627275 470.23195 98.233398 471.27557 curveto
+97.139648 471.27557 lineto
+96.456052 470.20461 95.943357 469.15188 95.601562 468.11737 curveto
+95.264322 467.08287 95.095702 466.0552 95.095703 465.03436 curveto
+95.095702 464.01809 95.264322 462.99498 95.601562 461.96503 curveto
+95.9388 460.93509 96.451495 459.88235 97.139648 458.80682 curveto
+98.233398 458.80682 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+103.98242 463.77655 moveto
+103.36263 463.77656 102.87044 463.98847 102.50586 464.41229 curveto
+102.14583 464.83613 101.96582 465.41718 101.96582 466.15546 curveto
+101.96582 466.88918 102.14583 467.47024 102.50586 467.89862 curveto
+102.87044 468.32245 103.36263 468.53436 103.98242 468.53436 curveto
+104.60221 468.53436 105.09212 468.32245 105.45215 467.89862 curveto
+105.81673 467.47024 105.99902 466.88918 105.99902 466.15546 curveto
+105.99902 465.41718 105.81673 464.83613 105.45215 464.41229 curveto
+105.09212 463.98847 104.60221 463.77656 103.98242 463.77655 curveto
+106.72363 459.4494 moveto
+106.72363 460.70721 lineto
+106.37727 460.54316 106.02636 460.41784 105.6709 460.33124 curveto
+105.31998 460.24466 104.97135 460.20136 104.625 460.20135 curveto
+103.71354 460.20136 103.01627 460.50898 102.5332 461.12421 curveto
+102.05468 461.73945 101.78125 462.66914 101.71289 463.91327 curveto
+101.98177 463.51679 102.31901 463.21373 102.72461 463.00409 curveto
+103.1302 462.7899 103.57682 462.68281 104.06445 462.6828 curveto
+105.08984 462.68281 105.89876 462.99498 106.49121 463.61932 curveto
+107.08821 464.23912 107.38671 465.0845 107.38672 466.15546 curveto
+107.38671 467.20364 107.07682 468.04446 106.45703 468.67792 curveto
+105.83723 469.31138 105.01236 469.62811 103.98242 469.62811 curveto
+102.80208 469.62811 101.89974 469.17694 101.27539 468.2746 curveto
+100.65104 467.3677 100.33887 466.0552 100.33887 464.3371 curveto
+100.33887 462.72382 100.72168 461.43867 101.4873 460.48163 curveto
+102.25293 459.52005 103.2806 459.03926 104.57031 459.03925 curveto
+104.91666 459.03926 105.26529 459.07344 105.61621 459.14178 curveto
+105.97167 459.21015 106.34081 459.31269 106.72363 459.4494 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+109.91602 467.69354 moveto
+111.3584 467.69354 lineto
+111.3584 468.86932 lineto
+110.2373 471.05682 lineto
+109.35547 471.05682 lineto
+109.91602 468.86932 lineto
+109.91602 467.69354 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+114.24316 459.22382 moveto
+119.66406 459.22382 lineto
+119.66406 460.38593 lineto
+115.50781 460.38593 lineto
+115.50781 462.88788 lineto
+115.70833 462.81953 115.90885 462.7694 116.10938 462.73749 curveto
+116.30989 462.70104 116.51041 462.68281 116.71094 462.6828 curveto
+117.85026 462.68281 118.7526 462.99498 119.41797 463.61932 curveto
+120.08333 464.24368 120.41601 465.08905 120.41602 466.15546 curveto
+120.41601 467.25377 120.07421 468.10826 119.39062 468.71893 curveto
+118.70703 469.32505 117.74316 469.62811 116.49902 469.62811 curveto
+116.07063 469.62811 115.63314 469.59165 115.18652 469.51874 curveto
+114.74446 469.44582 114.28646 469.33645 113.8125 469.19061 curveto
+113.8125 467.80292 lineto
+114.22265 468.02623 114.64648 468.19257 115.08398 468.30194 curveto
+115.52148 468.41132 115.98405 468.466 116.47168 468.466 curveto
+117.26009 468.466 117.88443 468.25865 118.34473 467.84393 curveto
+118.80501 467.42922 119.03515 466.8664 119.03516 466.15546 curveto
+119.03515 465.44452 118.80501 464.8817 118.34473 464.46698 curveto
+117.88443 464.05227 117.26009 463.84492 116.47168 463.84491 curveto
+116.10254 463.84492 115.7334 463.88593 115.36426 463.96796 curveto
+114.99967 464.04999 114.62597 464.1776 114.24316 464.35077 curveto
+114.24316 459.22382 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+123.28711 467.69354 moveto
+124.72949 467.69354 lineto
+124.72949 468.86932 lineto
+123.6084 471.05682 lineto
+122.72656 471.05682 lineto
+123.28711 468.86932 lineto
+123.28711 467.69354 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+130.38965 462.65546 moveto
+129.71517 462.65546 129.18196 462.91979 128.79004 463.44843 curveto
+128.39811 463.97252 128.20215 464.69257 128.20215 465.60858 curveto
+128.20215 466.5246 128.39583 467.24693 128.7832 467.77557 curveto
+129.17513 468.29966 129.71061 468.56171 130.38965 468.56171 curveto
+131.05957 468.56171 131.59049 468.29738 131.98242 467.76874 curveto
+132.37434 467.24009 132.57031 466.52004 132.57031 465.60858 curveto
+132.57031 464.70169 132.37434 463.98391 131.98242 463.45526 curveto
+131.59049 462.92206 131.05957 462.65546 130.38965 462.65546 curveto
+130.38965 461.58905 moveto
+131.48339 461.58906 132.34244 461.94453 132.9668 462.65546 curveto
+133.59114 463.3664 133.90331 464.35077 133.90332 465.60858 curveto
+133.90331 466.86184 133.59114 467.84621 132.9668 468.56171 curveto
+132.34244 469.27264 131.48339 469.62811 130.38965 469.62811 curveto
+129.29134 469.62811 128.43001 469.27264 127.80566 468.56171 curveto
+127.18587 467.84621 126.87598 466.86184 126.87598 465.60858 curveto
+126.87598 464.35077 127.18587 463.3664 127.80566 462.65546 curveto
+128.43001 461.94453 129.29134 461.58906 130.38965 461.58905 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+135.7832 458.80682 moveto
+136.87695 458.80682 lineto
+137.56054 459.88235 138.07096 460.93509 138.4082 461.96503 curveto
+138.75 462.99498 138.92089 464.01809 138.9209 465.03436 curveto
+138.92089 466.0552 138.75 467.08287 138.4082 468.11737 curveto
+138.07096 469.15188 137.56054 470.20461 136.87695 471.27557 curveto
+135.7832 471.27557 lineto
+136.38932 470.23195 136.84049 469.19517 137.13672 468.16522 curveto
+137.4375 467.13072 137.58789 466.0871 137.58789 465.03436 curveto
+137.58789 463.98163 137.4375 462.94257 137.13672 461.91718 curveto
+136.84049 460.89179 136.38932 459.85501 135.7832 458.80682 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+4.1689453 477.84198 moveto
+3.5582645 478.89017 3.1048144 479.92695 2.8085938 480.95233 curveto
+2.5123671 481.97773 2.3642553 483.01679 2.3642578 484.06952 curveto
+2.3642553 485.12226 2.5123671 486.16588 2.8085938 487.20038 curveto
+3.1093717 488.23033 3.5628218 489.26711 4.1689453 490.31073 curveto
+3.0751953 490.31073 lineto
+2.391599 489.23977 1.8789042 488.18703 1.5371094 487.15253 curveto
+1.1998684 486.11802 1.0312488 485.09036 1.03125 484.06952 curveto
+1.0312488 483.05325 1.1998684 482.03014 1.5371094 481.00018 curveto
+1.8743469 479.97024 2.3870417 478.91751 3.0751953 477.84198 curveto
+4.1689453 477.84198 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+6.4453125 478.25897 moveto
+13.007812 478.25897 lineto
+13.007812 478.84686 lineto
+9.3027344 488.46503 lineto
+7.8603516 488.46503 lineto
+11.34668 479.42108 lineto
+6.4453125 479.42108 lineto
+6.4453125 478.25897 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+15.851562 486.7287 moveto
+17.293945 486.7287 lineto
+17.293945 487.90448 lineto
+16.172852 490.09198 lineto
+15.291016 490.09198 lineto
+15.851562 487.90448 lineto
+15.851562 486.7287 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+23.289062 482.81171 moveto
+22.669267 482.81171 22.17708 483.02363 21.8125 483.44745 curveto
+21.452471 483.87128 21.272458 484.45234 21.272461 485.19061 curveto
+21.272458 485.92434 21.452471 486.50539 21.8125 486.93378 curveto
+22.17708 487.35761 22.669267 487.56952 23.289062 487.56952 curveto
+23.908849 487.56952 24.398757 487.35761 24.758789 486.93378 curveto
+25.123366 486.50539 25.305657 485.92434 25.305664 485.19061 curveto
+25.305657 484.45234 25.123366 483.87128 24.758789 483.44745 curveto
+24.398757 483.02363 23.908849 482.81171 23.289062 482.81171 curveto
+26.030273 478.48456 moveto
+26.030273 479.74237 lineto
+25.683912 479.57832 25.333001 479.45299 24.977539 479.36639 curveto
+24.626622 479.27981 24.277989 479.23652 23.931641 479.23651 curveto
+23.020178 479.23652 22.322913 479.54414 21.839844 480.15936 curveto
+21.361325 480.7746 21.087888 481.70429 21.019531 482.94843 curveto
+21.288409 482.55195 21.625648 482.24889 22.03125 482.03925 curveto
+22.436845 481.82506 22.883459 481.71796 23.371094 481.71796 curveto
+24.396479 481.71796 25.205397 482.03014 25.797852 482.65448 curveto
+26.394849 483.27428 26.693351 484.11965 26.693359 485.19061 curveto
+26.693351 486.23879 26.383456 487.07961 25.763672 487.71307 curveto
+25.143874 488.34654 24.319005 488.66327 23.289062 488.66327 curveto
+22.108721 488.66327 21.206378 488.2121 20.582031 487.30975 curveto
+19.957681 486.40285 19.645507 485.09036 19.645508 483.37225 curveto
+19.645507 481.75898 20.028319 480.47382 20.793945 479.51678 curveto
+21.559567 478.55521 22.587236 478.07441 23.876953 478.0744 curveto
+24.223302 478.07441 24.571934 478.10859 24.922852 478.17694 curveto
+25.278314 478.24531 25.647454 478.34785 26.030273 478.48456 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+29.222656 486.7287 moveto
+30.665039 486.7287 lineto
+30.665039 487.90448 lineto
+29.543945 490.09198 lineto
+28.662109 490.09198 lineto
+29.222656 487.90448 lineto
+29.222656 486.7287 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+36.325195 481.69061 moveto
+35.650713 481.69062 35.11751 481.95494 34.725586 482.48358 curveto
+34.333657 483.00768 34.137693 483.72773 34.137695 484.64374 curveto
+34.137693 485.55976 34.331378 486.28209 34.71875 486.81073 curveto
+35.110674 487.33482 35.646155 487.59686 36.325195 487.59686 curveto
+36.995112 487.59686 37.526036 487.33254 37.917969 486.80389 curveto
+38.30989 486.27525 38.505853 485.5552 38.505859 484.64374 curveto
+38.505853 483.73684 38.30989 483.01907 37.917969 482.49042 curveto
+37.526036 481.95722 36.995112 481.69062 36.325195 481.69061 curveto
+36.325195 480.62421 moveto
+37.41894 480.62421 38.277989 480.97968 38.902344 481.69061 curveto
+39.526685 482.40156 39.838859 483.38593 39.838867 484.64374 curveto
+39.838859 485.897 39.526685 486.88137 38.902344 487.59686 curveto
+38.277989 488.3078 37.41894 488.66327 36.325195 488.66327 curveto
+35.226885 488.66327 34.365558 488.3078 33.741211 487.59686 curveto
+33.121418 486.88137 32.811523 485.897 32.811523 484.64374 curveto
+32.811523 483.38593 33.121418 482.40156 33.741211 481.69061 curveto
+34.365558 480.97968 35.226885 480.62421 36.325195 480.62421 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+41.71875 477.84198 moveto
+42.8125 477.84198 lineto
+43.496091 478.91751 44.006507 479.97024 44.34375 481.00018 curveto
+44.685543 482.03014 44.856441 483.05325 44.856445 484.06952 curveto
+44.856441 485.09036 44.685543 486.11802 44.34375 487.15253 curveto
+44.006507 488.18703 43.496091 489.23977 42.8125 490.31073 curveto
+41.71875 490.31073 lineto
+42.324868 489.26711 42.776039 488.23033 43.072266 487.20038 curveto
+43.373044 486.16588 43.523435 485.12226 43.523438 484.06952 curveto
+43.523435 483.01679 43.373044 481.97773 43.072266 480.95233 curveto
+42.776039 479.92695 42.324868 478.89017 41.71875 477.84198 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+51.951172 478.34198 moveto
+51.340491 479.39017 50.887041 480.42695 50.59082 481.45233 curveto
+50.294594 482.47773 50.146482 483.51679 50.146484 484.56952 curveto
+50.146482 485.62226 50.294594 486.66588 50.59082 487.70038 curveto
+50.891598 488.73033 51.345048 489.76711 51.951172 490.81073 curveto
+50.857422 490.81073 lineto
+50.173826 489.73977 49.661131 488.68703 49.319336 487.65253 curveto
+48.982095 486.61802 48.813475 485.59036 48.813477 484.56952 curveto
+48.813475 483.55325 48.982095 482.53014 49.319336 481.50018 curveto
+49.656573 480.47024 50.169268 479.41751 50.857422 478.34198 curveto
+51.951172 478.34198 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+57.529297 484.11835 moveto
+56.873043 484.11835 56.355791 484.29381 55.977539 484.64471 curveto
+55.603839 484.99563 55.41699 485.4787 55.416992 486.09393 curveto
+55.41699 486.70917 55.603839 487.19224 55.977539 487.54315 curveto
+56.355791 487.89406 56.873043 488.06952 57.529297 488.06952 curveto
+58.185542 488.06952 58.702794 487.89406 59.081055 487.54315 curveto
+59.459304 487.18768 59.648431 486.70461 59.648438 486.09393 curveto
+59.648431 485.4787 59.459304 484.99563 59.081055 484.64471 curveto
+58.707351 484.29381 58.190099 484.11835 57.529297 484.11835 curveto
+56.148438 483.53046 moveto
+55.555987 483.38463 55.093422 483.10891 54.760742 482.70331 curveto
+54.432616 482.29772 54.268553 481.80325 54.268555 481.21991 curveto
+54.268553 480.40416 54.557941 479.75931 55.136719 479.28534 curveto
+55.720049 478.81139 56.517575 478.57441 57.529297 478.5744 curveto
+58.545567 478.57441 59.343093 478.81139 59.921875 479.28534 curveto
+60.500644 479.75931 60.790031 480.40416 60.790039 481.21991 curveto
+60.790031 481.80325 60.62369 482.29772 60.291016 482.70331 curveto
+59.962884 483.10891 59.504876 483.38463 58.916992 483.53046 curveto
+59.58235 483.68541 60.099602 483.98847 60.46875 484.43964 curveto
+60.84244 484.89081 61.029289 485.44224 61.029297 486.09393 curveto
+61.029289 487.08287 60.726229 487.84166 60.120117 488.3703 curveto
+59.518548 488.89895 58.654942 489.16327 57.529297 489.16327 curveto
+56.403643 489.16327 55.537758 488.89895 54.931641 488.3703 curveto
+54.330077 487.84166 54.029296 487.08287 54.029297 486.09393 curveto
+54.029296 485.44224 54.216145 484.89081 54.589844 484.43964 curveto
+54.96354 483.98847 55.483071 483.68541 56.148438 483.53046 curveto
+55.642578 481.34979 moveto
+55.642576 481.87845 55.806638 482.29088 56.134766 482.5871 curveto
+56.467445 482.88333 56.932288 483.03144 57.529297 483.03143 curveto
+58.12174 483.03144 58.584304 482.88333 58.916992 482.5871 curveto
+59.254226 482.29088 59.422845 481.87845 59.422852 481.34979 curveto
+59.422845 480.82115 59.254226 480.40872 58.916992 480.11249 curveto
+58.584304 479.81627 58.12174 479.66816 57.529297 479.66815 curveto
+56.932288 479.66816 56.467445 479.81627 56.134766 480.11249 curveto
+55.806638 480.40872 55.642576 480.82115 55.642578 481.34979 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+63.633789 487.2287 moveto
+65.076172 487.2287 lineto
+65.076172 488.40448 lineto
+63.955078 490.59198 lineto
+63.073242 490.59198 lineto
+63.633789 488.40448 lineto
+63.633789 487.2287 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+67.598633 478.75897 moveto
+74.161133 478.75897 lineto
+74.161133 479.34686 lineto
+70.456055 488.96503 lineto
+69.013672 488.96503 lineto
+72.5 479.92108 lineto
+67.598633 479.92108 lineto
+67.598633 478.75897 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+77.004883 487.2287 moveto
+78.447266 487.2287 lineto
+78.447266 488.40448 lineto
+77.326172 490.59198 lineto
+76.444336 490.59198 lineto
+77.004883 488.40448 lineto
+77.004883 487.2287 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+84.107422 482.19061 moveto
+83.432939 482.19062 82.899737 482.45494 82.507812 482.98358 curveto
+82.115883 483.50768 81.91992 484.22773 81.919922 485.14374 curveto
+81.91992 486.05976 82.113604 486.78209 82.500977 487.31073 curveto
+82.892901 487.83482 83.428382 488.09686 84.107422 488.09686 curveto
+84.777339 488.09686 85.308263 487.83254 85.700195 487.30389 curveto
+86.092116 486.77525 86.288079 486.0552 86.288086 485.14374 curveto
+86.288079 484.23684 86.092116 483.51907 85.700195 482.99042 curveto
+85.308263 482.45722 84.777339 482.19062 84.107422 482.19061 curveto
+84.107422 481.12421 moveto
+85.201166 481.12421 86.060215 481.47968 86.68457 482.19061 curveto
+87.308912 482.90156 87.621086 483.88593 87.621094 485.14374 curveto
+87.621086 486.397 87.308912 487.38137 86.68457 488.09686 curveto
+86.060215 488.8078 85.201166 489.16327 84.107422 489.16327 curveto
+83.009111 489.16327 82.147784 488.8078 81.523438 488.09686 curveto
+80.903645 487.38137 80.593749 486.397 80.59375 485.14374 curveto
+80.593749 483.88593 80.903645 482.90156 81.523438 482.19061 curveto
+82.147784 481.47968 83.009111 481.12421 84.107422 481.12421 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+89.500977 478.34198 moveto
+90.594727 478.34198 lineto
+91.278317 479.41751 91.788734 480.47024 92.125977 481.50018 curveto
+92.467769 482.53014 92.638668 483.55325 92.638672 484.56952 curveto
+92.638668 485.59036 92.467769 486.61802 92.125977 487.65253 curveto
+91.788734 488.68703 91.278317 489.73977 90.594727 490.81073 curveto
+89.500977 490.81073 lineto
+90.107095 489.76711 90.558266 488.73033 90.854492 487.70038 curveto
+91.155271 486.66588 91.305661 485.62226 91.305664 484.56952 curveto
+91.305661 483.51679 91.155271 482.47773 90.854492 481.45233 curveto
+90.558266 480.42695 90.107095 479.39017 89.500977 478.34198 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+96.905273 479.30878 moveto
+96.294593 480.35696 95.841143 481.39375 95.544922 482.41913 curveto
+95.248695 483.44453 95.100583 484.48359 95.100586 485.53632 curveto
+95.100583 486.58905 95.248695 487.63267 95.544922 488.66718 curveto
+95.8457 489.69712 96.29915 490.73391 96.905273 491.77753 curveto
+95.811523 491.77753 lineto
+95.127927 490.70656 94.615232 489.65383 94.273438 488.61932 curveto
+93.936197 487.58482 93.767577 486.55715 93.767578 485.53632 curveto
+93.767577 484.52005 93.936197 483.49693 94.273438 482.46698 curveto
+94.610675 481.43704 95.12337 480.38431 95.811523 479.30878 curveto
+96.905273 479.30878 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+99.571289 489.71991 moveto
+99.571289 488.4621 lineto
+99.917641 488.62616 100.26855 488.75149 100.62402 488.83807 curveto
+100.97949 488.92466 101.32812 488.96796 101.66992 488.96796 curveto
+102.58138 488.96796 103.27636 488.66262 103.75488 488.05194 curveto
+104.23795 487.43671 104.51367 486.50474 104.58203 485.25604 curveto
+104.3177 485.64797 103.98274 485.94875 103.57715 486.15839 curveto
+103.17154 486.36803 102.72265 486.47284 102.23047 486.47284 curveto
+101.20963 486.47284 100.40071 486.16523 99.803711 485.54999 curveto
+99.211262 484.9302 98.915038 484.08482 98.915039 483.01385 curveto
+98.915038 481.96569 99.224934 481.12487 99.844727 480.49139 curveto
+100.46452 479.85794 101.28938 479.54121 102.31934 479.5412 curveto
+103.49967 479.54121 104.39973 479.99466 105.01953 480.90155 curveto
+105.64387 481.8039 105.95605 483.1164 105.95605 484.83905 curveto
+105.95605 486.44778 105.57323 487.73293 104.80762 488.69452 curveto
+104.04654 489.65155 103.02115 490.13007 101.73145 490.13007 curveto
+101.38509 490.13007 101.03418 490.09589 100.67871 490.02753 curveto
+100.32324 489.95917 99.9541 489.85663 99.571289 489.71991 curveto
+102.31934 485.39276 moveto
+102.93912 485.39277 103.42903 485.18085 103.78906 484.75702 curveto
+104.15364 484.3332 104.33593 483.75214 104.33594 483.01385 curveto
+104.33593 482.28014 104.15364 481.70136 103.78906 481.27753 curveto
+103.42903 480.84915 102.93912 480.63496 102.31934 480.63495 curveto
+101.69954 480.63496 101.20735 480.84915 100.84277 481.27753 curveto
+100.48274 481.70136 100.30273 482.28014 100.30273 483.01385 curveto
+100.30273 483.75214 100.48274 484.3332 100.84277 484.75702 curveto
+101.20735 485.18085 101.69954 485.39277 102.31934 485.39276 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+108.58789 488.1955 moveto
+110.03027 488.1955 lineto
+110.03027 489.37128 lineto
+108.90918 491.55878 lineto
+108.02734 491.55878 lineto
+108.58789 489.37128 lineto
+108.58789 488.1955 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+115.85449 485.08514 moveto
+115.19824 485.08515 114.68099 485.2606 114.30273 485.61151 curveto
+113.92903 485.96243 113.74219 486.4455 113.74219 487.06073 curveto
+113.74219 487.67597 113.92903 488.15904 114.30273 488.50995 curveto
+114.68099 488.86086 115.19824 489.03632 115.85449 489.03632 curveto
+116.51074 489.03632 117.02799 488.86086 117.40625 488.50995 curveto
+117.7845 488.15448 117.97363 487.67141 117.97363 487.06073 curveto
+117.97363 486.4455 117.7845 485.96243 117.40625 485.61151 curveto
+117.03255 485.2606 116.51529 485.08515 115.85449 485.08514 curveto
+114.47363 484.49725 moveto
+113.88118 484.35143 113.41862 484.07571 113.08594 483.6701 curveto
+112.75781 483.26451 112.59375 482.77005 112.59375 482.18671 curveto
+112.59375 481.37096 112.88314 480.7261 113.46191 480.25214 curveto
+114.04524 479.77819 114.84277 479.54121 115.85449 479.5412 curveto
+116.87076 479.54121 117.66829 479.77819 118.24707 480.25214 curveto
+118.82584 480.7261 119.11523 481.37096 119.11523 482.18671 curveto
+119.11523 482.77005 118.94889 483.26451 118.61621 483.6701 curveto
+118.28808 484.07571 117.83007 484.35143 117.24219 484.49725 curveto
+117.90755 484.65221 118.4248 484.95527 118.79395 485.40643 curveto
+119.16764 485.85761 119.35448 486.40904 119.35449 487.06073 curveto
+119.35448 488.04966 119.05142 488.80845 118.44531 489.3371 curveto
+117.84374 489.86574 116.98014 490.13007 115.85449 490.13007 curveto
+114.72884 490.13007 113.86295 489.86574 113.25684 489.3371 curveto
+112.65527 488.80845 112.35449 488.04966 112.35449 487.06073 curveto
+112.35449 486.40904 112.54134 485.85761 112.91504 485.40643 curveto
+113.28874 484.95527 113.80827 484.65221 114.47363 484.49725 curveto
+113.96777 482.31659 moveto
+113.96777 482.84524 114.13183 483.25768 114.45996 483.55389 curveto
+114.79264 483.85012 115.25748 483.99824 115.85449 483.99823 curveto
+116.44694 483.99824 116.9095 483.85012 117.24219 483.55389 curveto
+117.57942 483.25768 117.74804 482.84524 117.74805 482.31659 curveto
+117.74804 481.78795 117.57942 481.37552 117.24219 481.07928 curveto
+116.9095 480.78307 116.44694 480.63496 115.85449 480.63495 curveto
+115.25748 480.63496 114.79264 480.78307 114.45996 481.07928 curveto
+114.13183 481.37552 113.96777 481.78795 113.96777 482.31659 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+121.95898 488.1955 moveto
+123.40137 488.1955 lineto
+123.40137 489.37128 lineto
+122.28027 491.55878 lineto
+121.39844 491.55878 lineto
+121.95898 489.37128 lineto
+121.95898 488.1955 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+131.13281 486.01483 moveto
+131.13281 485.10338 130.94368 484.397 130.56543 483.89569 curveto
+130.19173 483.3944 129.66536 483.14374 128.98633 483.14374 curveto
+128.31185 483.14374 127.78548 483.3944 127.40723 483.89569 curveto
+127.03353 484.397 126.84668 485.10338 126.84668 486.01483 curveto
+126.84668 486.92174 127.03353 487.62584 127.40723 488.12714 curveto
+127.78548 488.62844 128.31185 488.87909 128.98633 488.87909 curveto
+129.66536 488.87909 130.19173 488.62844 130.56543 488.12714 curveto
+130.94368 487.62584 131.13281 486.92174 131.13281 486.01483 curveto
+132.39062 488.98163 moveto
+132.39062 490.28501 132.10123 491.25344 131.52246 491.8869 curveto
+130.94368 492.52492 130.05729 492.84393 128.86328 492.84393 curveto
+128.42122 492.84393 128.00423 492.80975 127.6123 492.74139 curveto
+127.22038 492.67759 126.83984 492.57733 126.4707 492.44061 curveto
+126.4707 491.21698 lineto
+126.83984 491.4175 127.20442 491.56561 127.56445 491.66132 curveto
+127.92448 491.75702 128.29134 491.80487 128.66504 491.80487 curveto
+129.4899 491.80487 130.10742 491.5884 130.51758 491.15546 curveto
+130.92773 490.72707 131.13281 490.07766 131.13281 489.20721 curveto
+131.13281 488.58514 lineto
+130.87304 489.03632 130.54036 489.37356 130.13477 489.59686 curveto
+129.72916 489.82017 129.24381 489.93182 128.67871 489.93182 curveto
+127.73991 489.93182 126.9834 489.57408 126.40918 488.85858 curveto
+125.83496 488.14309 125.54785 487.19517 125.54785 486.01483 curveto
+125.54785 484.82994 125.83496 483.87975 126.40918 483.16425 curveto
+126.9834 482.44876 127.73991 482.09101 128.67871 482.091 curveto
+129.24381 482.09101 129.72916 482.20266 130.13477 482.42596 curveto
+130.54036 482.64928 130.87304 482.98652 131.13281 483.43768 curveto
+131.13281 482.27557 lineto
+132.39062 482.27557 lineto
+132.39062 488.98163 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+134.7832 479.30878 moveto
+135.87695 479.30878 lineto
+136.56054 480.38431 137.07096 481.43704 137.4082 482.46698 curveto
+137.75 483.49693 137.92089 484.52005 137.9209 485.53632 curveto
+137.92089 486.55715 137.75 487.58482 137.4082 488.61932 curveto
+137.07096 489.65383 136.56054 490.70656 135.87695 491.77753 curveto
+134.7832 491.77753 lineto
+135.38932 490.73391 135.84049 489.69712 136.13672 488.66718 curveto
+136.4375 487.63267 136.58789 486.58905 136.58789 485.53632 curveto
+136.58789 484.48359 136.4375 483.44453 136.13672 482.41913 curveto
+135.84049 481.39375 135.38932 480.35696 134.7832 479.30878 curveto
+fill
+grestore
+gsave [1.209899 0 0 0.857509 -38.19343 49.51799] concat
+0 0 0 setrgbcolor
+[] 0 setdash
+1.5 setlinewidth
+0 setlinejoin
+0 setlinecap
+newpath
+154.5 482.61218 moveto
+154.5 515.59418 125.716 542.36218 90.25 542.36218 curveto
+54.784 542.36218 26 515.59418 26 482.61218 curveto
+26 449.63018 54.784 422.86218 90.25 422.86218 curveto
+125.716 422.86218 154.5 449.63018 154.5 482.61218 curveto
+closepath
+stroke
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+67.874023 420.15613 moveto
+69.254883 420.15613 lineto
+69.254883 430.36218 lineto
+67.874023 430.36218 lineto
+67.874023 420.15613 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+73.192383 420.5321 moveto
+73.192383 422.70593 lineto
+75.783203 422.70593 lineto
+75.783203 423.68347 lineto
+73.192383 423.68347 lineto
+73.192383 427.83972 lineto
+73.19238 428.46407 73.27669 428.86511 73.445312 429.04285 curveto
+73.618487 429.22058 73.967119 429.30945 74.491211 429.30945 curveto
+75.783203 429.30945 lineto
+75.783203 430.36218 lineto
+74.491211 430.36218 lineto
+73.520505 430.36218 72.850584 430.18217 72.481445 429.82214 curveto
+72.112303 429.45756 71.927733 428.79675 71.927734 427.83972 curveto
+71.927734 423.68347 lineto
+71.004883 423.68347 lineto
+71.004883 422.70593 lineto
+71.927734 422.70593 lineto
+71.927734 420.5321 lineto
+73.192383 420.5321 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+59.254883 524.79089 moveto
+59.254883 528.62585 lineto
+60.991211 528.62585 lineto
+61.633784 528.62586 62.130528 528.45952 62.481445 528.12683 curveto
+62.83235 527.79415 63.007806 527.3202 63.007812 526.70496 curveto
+63.007806 526.09429 62.83235 525.62261 62.481445 525.28992 curveto
+62.130528 524.95724 61.633784 524.7909 60.991211 524.79089 curveto
+59.254883 524.79089 lineto
+57.874023 523.65613 moveto
+60.991211 523.65613 lineto
+62.135086 523.65614 62.998691 523.9159 63.582031 524.43542 curveto
+64.169914 524.95041 64.463859 525.70692 64.463867 526.70496 curveto
+64.463859 527.71212 64.169914 528.47319 63.582031 528.98816 curveto
+62.998691 529.50314 62.135086 529.76062 60.991211 529.76062 curveto
+59.254883 529.76062 lineto
+59.254883 533.86218 lineto
+57.874023 533.86218 lineto
+57.874023 523.65613 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+69.399414 524.56531 moveto
+68.688473 524.56532 68.152992 524.91623 67.792969 525.61804 curveto
+67.437498 526.31532 67.259763 527.36577 67.259766 528.76941 curveto
+67.259763 530.1685 67.437498 531.21896 67.792969 531.92078 curveto
+68.152992 532.61804 68.688473 532.96668 69.399414 532.96667 curveto
+70.114904 532.96668 70.650385 532.61804 71.005859 531.92078 curveto
+71.365879 531.21896 71.545892 530.1685 71.545898 528.76941 curveto
+71.545892 527.36577 71.365879 526.31532 71.005859 525.61804 curveto
+70.650385 524.91623 70.114904 524.56532 69.399414 524.56531 curveto
+69.399414 523.47156 moveto
+70.543289 523.47157 71.416009 523.92502 72.017578 524.83191 curveto
+72.62369 525.73426 72.92675 527.04676 72.926758 528.76941 curveto
+72.92675 530.48751 72.62369 531.80001 72.017578 532.70691 curveto
+71.416009 533.60925 70.543289 534.06042 69.399414 534.06042 curveto
+68.255531 534.06042 67.380531 533.60925 66.774414 532.70691 curveto
+66.17285 531.80001 65.872069 530.48751 65.87207 528.76941 curveto
+65.872069 527.04676 66.17285 525.73426 66.774414 524.83191 curveto
+67.380531 523.92502 68.255531 523.47157 69.399414 523.47156 curveto
+fill
+grestore
+gsave [1.063088 0 0 0.999533 -49.82231 5.914079] concat
+0 0 0 setrgbcolor
+[] 0 setdash
+1.5 setlinewidth
+0 setlinejoin
+0 setlinecap
+newpath
+349.5 351.36218 moveto
+349.5 378.68618 319.484 400.86218 282.5 400.86218 curveto
+245.516 400.86218 215.5 378.68618 215.5 351.36218 curveto
+215.5 324.03818 245.516 301.86218 282.5 301.86218 curveto
+319.484 301.86218 349.5 324.03818 349.5 351.36218 curveto
+closepath
+stroke
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+205.85547 329.61218 moveto
+205.24479 330.66037 204.79134 331.69715 204.49512 332.72253 curveto
+204.19889 333.74793 204.05078 334.78699 204.05078 335.83972 curveto
+204.05078 336.89246 204.19889 337.93608 204.49512 338.97058 curveto
+204.7959 340.00053 205.24935 341.03731 205.85547 342.08093 curveto
+204.76172 342.08093 lineto
+204.07812 341.00997 203.56543 339.95723 203.22363 338.92273 curveto
+202.88639 337.88823 202.71777 336.86056 202.71777 335.83972 curveto
+202.71777 334.82345 202.88639 333.80034 203.22363 332.77039 curveto
+203.56087 331.74045 204.07357 330.68771 204.76172 329.61218 curveto
+205.85547 329.61218 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+208.49414 330.02917 moveto
+213.91504 330.02917 lineto
+213.91504 331.19128 lineto
+209.75879 331.19128 lineto
+209.75879 333.69324 lineto
+209.95931 333.62488 210.15983 333.57475 210.36035 333.54285 curveto
+210.56087 333.5064 210.76139 333.48817 210.96191 333.48816 curveto
+212.10123 333.48817 213.00357 333.80034 213.66895 334.42468 curveto
+214.3343 335.04904 214.66698 335.89441 214.66699 336.96082 curveto
+214.66698 338.05912 214.32519 338.91362 213.6416 339.52429 curveto
+212.958 340.13041 211.99414 340.43347 210.75 340.43347 curveto
+210.32161 340.43347 209.88411 340.39701 209.4375 340.3241 curveto
+208.99544 340.25118 208.53743 340.14181 208.06348 339.99597 curveto
+208.06348 338.60828 lineto
+208.47363 338.83159 208.89746 338.99793 209.33496 339.1073 curveto
+209.77246 339.21668 210.23502 339.27136 210.72266 339.27136 curveto
+211.51106 339.27136 212.13541 339.06401 212.5957 338.64929 curveto
+213.05598 338.23458 213.28613 337.67176 213.28613 336.96082 curveto
+213.28613 336.24988 213.05598 335.68706 212.5957 335.27234 curveto
+212.13541 334.85763 211.51106 334.65027 210.72266 334.65027 curveto
+210.35351 334.65027 209.98437 334.69129 209.61523 334.77332 curveto
+209.25065 334.85535 208.87695 334.98296 208.49414 335.15613 curveto
+208.49414 330.02917 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+217.53809 338.4989 moveto
+218.98047 338.4989 lineto
+218.98047 339.67468 lineto
+217.85938 341.86218 lineto
+216.97754 341.86218 lineto
+217.53809 339.67468 lineto
+217.53809 338.4989 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+224.80469 330.93835 moveto
+224.09375 330.93836 223.55827 331.28927 223.19824 331.99109 curveto
+222.84277 332.68836 222.66504 333.73882 222.66504 335.14246 curveto
+222.66504 336.54155 222.84277 337.592 223.19824 338.29382 curveto
+223.55827 338.99109 224.09375 339.33972 224.80469 339.33972 curveto
+225.52018 339.33972 226.05566 338.99109 226.41113 338.29382 curveto
+226.77115 337.592 226.95117 336.54155 226.95117 335.14246 curveto
+226.95117 333.73882 226.77115 332.68836 226.41113 331.99109 curveto
+226.05566 331.28927 225.52018 330.93836 224.80469 330.93835 curveto
+224.80469 329.8446 moveto
+225.94856 329.84461 226.82128 330.29806 227.42285 331.20496 curveto
+228.02896 332.10731 228.33202 333.41981 228.33203 335.14246 curveto
+228.33202 336.86056 228.02896 338.17306 227.42285 339.07996 curveto
+226.82128 339.9823 225.94856 340.43347 224.80469 340.43347 curveto
+223.6608 340.43347 222.7858 339.9823 222.17969 339.07996 curveto
+221.57812 338.17306 221.27734 336.86056 221.27734 335.14246 curveto
+221.27734 333.41981 221.57812 332.10731 222.17969 331.20496 curveto
+222.7858 330.29806 223.6608 329.84461 224.80469 329.8446 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+230.90918 338.4989 moveto
+232.35156 338.4989 lineto
+232.35156 339.67468 lineto
+231.23047 341.86218 lineto
+230.34863 341.86218 lineto
+230.90918 339.67468 lineto
+230.90918 338.4989 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+240.08301 336.31824 moveto
+240.083 335.40678 239.89387 334.7004 239.51562 334.1991 curveto
+239.14192 333.6978 238.61556 333.44715 237.93652 333.44714 curveto
+237.26204 333.44715 236.73567 333.6978 236.35742 334.1991 curveto
+235.98372 334.7004 235.79687 335.40678 235.79688 336.31824 curveto
+235.79687 337.22514 235.98372 337.92924 236.35742 338.43054 curveto
+236.73567 338.93185 237.26204 339.1825 237.93652 339.1825 curveto
+238.61556 339.1825 239.14192 338.93185 239.51562 338.43054 curveto
+239.89387 337.92924 240.083 337.22514 240.08301 336.31824 curveto
+241.34082 339.28503 moveto
+241.34081 340.58842 241.05142 341.55684 240.47266 342.19031 curveto
+239.89387 342.82833 239.00748 343.14734 237.81348 343.14734 curveto
+237.37142 343.14734 236.95442 343.11316 236.5625 343.0448 curveto
+236.17057 342.98099 235.79004 342.88073 235.4209 342.74402 curveto
+235.4209 341.52039 lineto
+235.79004 341.72091 236.15462 341.86902 236.51465 341.96472 curveto
+236.87467 342.06042 237.24153 342.10827 237.61523 342.10828 curveto
+238.4401 342.10827 239.05761 341.8918 239.46777 341.45886 curveto
+239.87792 341.03048 240.083 340.38106 240.08301 339.51062 curveto
+240.08301 338.88855 lineto
+239.82324 339.33972 239.49055 339.67696 239.08496 339.90027 curveto
+238.67936 340.12358 238.19401 340.23523 237.62891 340.23523 curveto
+236.6901 340.23523 235.93359 339.87748 235.35938 339.16199 curveto
+234.78516 338.44649 234.49805 337.49858 234.49805 336.31824 curveto
+234.49805 335.13335 234.78516 334.18315 235.35938 333.46765 curveto
+235.93359 332.75216 236.6901 332.39442 237.62891 332.39441 curveto
+238.19401 332.39442 238.67936 332.50607 239.08496 332.72937 curveto
+239.49055 332.95268 239.82324 333.28992 240.08301 333.74109 curveto
+240.08301 332.57898 lineto
+241.34082 332.57898 lineto
+241.34082 339.28503 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+243.7334 329.61218 moveto
+244.82715 329.61218 lineto
+245.51074 330.68771 246.02116 331.74045 246.3584 332.77039 curveto
+246.70019 333.80034 246.87109 334.82345 246.87109 335.83972 curveto
+246.87109 336.86056 246.70019 337.88823 246.3584 338.92273 curveto
+246.02116 339.95723 245.51074 341.00997 244.82715 342.08093 curveto
+243.7334 342.08093 lineto
+244.33952 341.03731 244.79069 340.00053 245.08691 338.97058 curveto
+245.38769 337.93608 245.53808 336.89246 245.53809 335.83972 curveto
+245.53808 334.78699 245.38769 333.74793 245.08691 332.72253 curveto
+244.79069 331.69715 244.33952 330.66037 243.7334 329.61218 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+253.34082 328.61218 moveto
+252.73014 329.66037 252.27669 330.69715 251.98047 331.72253 curveto
+251.68424 332.74793 251.53613 333.78699 251.53613 334.83972 curveto
+251.53613 335.89246 251.68424 336.93608 251.98047 337.97058 curveto
+252.28125 339.00053 252.7347 340.03731 253.34082 341.08093 curveto
+252.24707 341.08093 lineto
+251.56347 340.00997 251.05078 338.95723 250.70898 337.92273 curveto
+250.37174 336.88823 250.20312 335.86056 250.20312 334.83972 curveto
+250.20312 333.82345 250.37174 332.80034 250.70898 331.77039 curveto
+251.04622 330.74045 251.55892 329.68771 252.24707 328.61218 curveto
+253.34082 328.61218 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+259.08984 333.58191 moveto
+258.47005 333.58191 257.97786 333.79383 257.61328 334.21765 curveto
+257.25325 334.64148 257.07324 335.22254 257.07324 335.96082 curveto
+257.07324 336.69454 257.25325 337.2756 257.61328 337.70398 curveto
+257.97786 338.12781 258.47005 338.33972 259.08984 338.33972 curveto
+259.70963 338.33972 260.19954 338.12781 260.55957 337.70398 curveto
+260.92415 337.2756 261.10644 336.69454 261.10645 335.96082 curveto
+261.10644 335.22254 260.92415 334.64148 260.55957 334.21765 curveto
+260.19954 333.79383 259.70963 333.58191 259.08984 333.58191 curveto
+261.83105 329.25476 moveto
+261.83105 330.51257 lineto
+261.48469 330.34852 261.13378 330.22319 260.77832 330.1366 curveto
+260.4274 330.05002 260.07877 330.00672 259.73242 330.00671 curveto
+258.82096 330.00672 258.12369 330.31434 257.64062 330.92957 curveto
+257.16211 331.54481 256.88867 332.47449 256.82031 333.71863 curveto
+257.08919 333.32215 257.42643 333.01909 257.83203 332.80945 curveto
+258.23763 332.59526 258.68424 332.48817 259.17188 332.48816 curveto
+260.19726 332.48817 261.00618 332.80034 261.59863 333.42468 curveto
+262.19563 334.04448 262.49413 334.88986 262.49414 335.96082 curveto
+262.49413 337.00899 262.18424 337.84981 261.56445 338.48328 curveto
+260.94465 339.11674 260.11979 339.43347 259.08984 339.43347 curveto
+257.9095 339.43347 257.00716 338.9823 256.38281 338.07996 curveto
+255.75846 337.17306 255.44629 335.86056 255.44629 334.14246 curveto
+255.44629 332.52918 255.8291 331.24403 256.59473 330.28699 curveto
+257.36035 329.32541 258.38802 328.84461 259.67773 328.8446 curveto
+260.02408 328.84461 260.37272 328.87879 260.72363 328.94714 curveto
+261.07909 329.01551 261.44824 329.11805 261.83105 329.25476 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+265.02344 337.4989 moveto
+266.46582 337.4989 lineto
+266.46582 338.67468 lineto
+265.34473 340.86218 lineto
+264.46289 340.86218 lineto
+265.02344 338.67468 lineto
+265.02344 337.4989 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+269.57617 338.07312 moveto
+271.83203 338.07312 lineto
+271.83203 330.28699 lineto
+269.37793 330.77917 lineto
+269.37793 329.52136 lineto
+271.81836 329.02917 lineto
+273.19922 329.02917 lineto
+273.19922 338.07312 lineto
+275.45508 338.07312 lineto
+275.45508 339.23523 lineto
+269.57617 339.23523 lineto
+269.57617 338.07312 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+278.39453 337.4989 moveto
+279.83691 337.4989 lineto
+279.83691 338.67468 lineto
+278.71582 340.86218 lineto
+277.83398 340.86218 lineto
+278.39453 338.67468 lineto
+278.39453 337.4989 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+285.49707 332.46082 moveto
+284.82259 332.46082 284.28938 332.72514 283.89746 333.25378 curveto
+283.50553 333.77788 283.30957 334.49793 283.30957 335.41394 curveto
+283.30957 336.32996 283.50325 337.05229 283.89062 337.58093 curveto
+284.28255 338.10502 284.81803 338.36707 285.49707 338.36707 curveto
+286.16699 338.36707 286.69791 338.10274 287.08984 337.5741 curveto
+287.48176 337.04545 287.67773 336.3254 287.67773 335.41394 curveto
+287.67773 334.50704 287.48176 333.78927 287.08984 333.26062 curveto
+286.69791 332.72742 286.16699 332.46082 285.49707 332.46082 curveto
+285.49707 331.39441 moveto
+286.59081 331.39442 287.44986 331.74989 288.07422 332.46082 curveto
+288.69856 333.17176 289.01073 334.15613 289.01074 335.41394 curveto
+289.01073 336.6672 288.69856 337.65157 288.07422 338.36707 curveto
+287.44986 339.078 286.59081 339.43347 285.49707 339.43347 curveto
+284.39876 339.43347 283.53743 339.078 282.91309 338.36707 curveto
+282.29329 337.65157 281.9834 336.6672 281.9834 335.41394 curveto
+281.9834 334.15613 282.29329 333.17176 282.91309 332.46082 curveto
+283.53743 331.74989 284.39876 331.39442 285.49707 331.39441 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+290.89062 328.61218 moveto
+291.98438 328.61218 lineto
+292.66797 329.68771 293.17838 330.74045 293.51562 331.77039 curveto
+293.85742 332.80034 294.02832 333.82345 294.02832 334.83972 curveto
+294.02832 335.86056 293.85742 336.88823 293.51562 337.92273 curveto
+293.17838 338.95723 292.66797 340.00997 291.98438 341.08093 curveto
+290.89062 341.08093 lineto
+291.49674 340.03731 291.94791 339.00053 292.24414 337.97058 curveto
+292.54492 336.93608 292.69531 335.89246 292.69531 334.83972 curveto
+292.69531 333.78699 292.54492 332.74793 292.24414 331.72253 curveto
+291.94791 330.69715 291.49674 329.66037 290.89062 328.61218 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+188.1377 350.98914 moveto
+187.52701 352.03732 187.07356 353.07411 186.77734 354.09949 curveto
+186.48112 355.12488 186.33301 356.16395 186.33301 357.21667 curveto
+186.33301 358.26941 186.48112 359.31303 186.77734 360.34753 curveto
+187.07812 361.37748 187.53157 362.41427 188.1377 363.45789 curveto
+187.04395 363.45789 lineto
+186.36035 362.38692 185.84765 361.33419 185.50586 360.29968 curveto
+185.16862 359.26518 185 358.23751 185 357.21667 curveto
+185 356.2004 185.16862 355.17729 185.50586 354.14734 curveto
+185.8431 353.1174 186.35579 352.06467 187.04395 350.98914 curveto
+188.1377 350.98914 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+190.41406 351.40613 moveto
+196.97656 351.40613 lineto
+196.97656 351.99402 lineto
+193.27148 361.61218 lineto
+191.8291 361.61218 lineto
+195.31543 352.56824 lineto
+190.41406 352.56824 lineto
+190.41406 351.40613 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+199.82031 359.87585 moveto
+201.2627 359.87585 lineto
+201.2627 361.05164 lineto
+200.1416 363.23914 lineto
+199.25977 363.23914 lineto
+199.82031 361.05164 lineto
+199.82031 359.87585 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+205.32324 360.45007 moveto
+210.14258 360.45007 lineto
+210.14258 361.61218 lineto
+203.66211 361.61218 lineto
+203.66211 360.45007 lineto
+204.1862 359.90776 204.89941 359.18087 205.80176 358.26941 curveto
+206.70865 357.3534 207.27832 356.76323 207.51074 356.4989 curveto
+207.95279 356.00216 208.26041 355.58289 208.43359 355.24109 curveto
+208.61132 354.89474 208.70019 354.55522 208.7002 354.22253 curveto
+208.70019 353.68022 208.50878 353.23817 208.12598 352.89636 curveto
+207.74772 352.55457 207.25325 352.38368 206.64258 352.38367 curveto
+206.20963 352.38368 205.75162 352.45887 205.26855 352.60925 curveto
+204.79004 352.75965 204.27734 352.98752 203.73047 353.29285 curveto
+203.73047 351.89832 lineto
+204.28646 351.67502 204.80599 351.5064 205.28906 351.39246 curveto
+205.77213 351.27853 206.21419 351.22157 206.61523 351.22156 curveto
+207.67252 351.22157 208.51562 351.48589 209.14453 352.01453 curveto
+209.77343 352.54318 210.08788 353.24956 210.08789 354.13367 curveto
+210.08788 354.55294 210.00813 354.95171 209.84863 355.32996 curveto
+209.69368 355.70366 209.40885 356.14572 208.99414 356.65613 curveto
+208.8802 356.78829 208.5179 357.17111 207.90723 357.80457 curveto
+207.29654 358.43347 206.43522 359.31531 205.32324 360.45007 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+213.19141 359.87585 moveto
+214.63379 359.87585 lineto
+214.63379 361.05164 lineto
+213.5127 363.23914 lineto
+212.63086 363.23914 lineto
+213.19141 361.05164 lineto
+213.19141 359.87585 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+220.29395 354.83777 moveto
+219.61946 354.83778 219.08626 355.1021 218.69434 355.63074 curveto
+218.30241 356.15483 218.10644 356.87488 218.10645 357.79089 curveto
+218.10644 358.70691 218.30013 359.42924 218.6875 359.95789 curveto
+219.07942 360.48198 219.61491 360.74402 220.29395 360.74402 curveto
+220.96386 360.74402 221.49479 360.4797 221.88672 359.95105 curveto
+222.27864 359.42241 222.4746 358.70235 222.47461 357.79089 curveto
+222.4746 356.884 222.27864 356.16622 221.88672 355.63757 curveto
+221.49479 355.10438 220.96386 354.83778 220.29395 354.83777 curveto
+220.29395 353.77136 moveto
+221.38769 353.77137 222.24674 354.12684 222.87109 354.83777 curveto
+223.49544 355.54871 223.80761 356.53309 223.80762 357.79089 curveto
+223.80761 359.04415 223.49544 360.02853 222.87109 360.74402 curveto
+222.24674 361.45496 221.38769 361.81042 220.29395 361.81042 curveto
+219.19563 361.81042 218.33431 361.45496 217.70996 360.74402 curveto
+217.09017 360.02853 216.78027 359.04415 216.78027 357.79089 curveto
+216.78027 356.53309 217.09017 355.54871 217.70996 354.83777 curveto
+218.33431 354.12684 219.19563 353.77137 220.29395 353.77136 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+225.6875 350.98914 moveto
+226.78125 350.98914 lineto
+227.46484 352.06467 227.97526 353.1174 228.3125 354.14734 curveto
+228.65429 355.17729 228.82519 356.2004 228.8252 357.21667 curveto
+228.82519 358.23751 228.65429 359.26518 228.3125 360.29968 curveto
+227.97526 361.33419 227.46484 362.38692 226.78125 363.45789 curveto
+225.6875 363.45789 lineto
+226.29362 362.41427 226.74479 361.37748 227.04102 360.34753 curveto
+227.34179 359.31303 227.49218 358.26941 227.49219 357.21667 curveto
+227.49218 356.16395 227.34179 355.12488 227.04102 354.09949 curveto
+226.74479 353.07411 226.29362 352.03732 225.6875 350.98914 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+232.84082 350.48914 moveto
+232.23014 351.53732 231.77669 352.57411 231.48047 353.59949 curveto
+231.18424 354.62488 231.03613 355.66395 231.03613 356.71667 curveto
+231.03613 357.76941 231.18424 358.81303 231.48047 359.84753 curveto
+231.78125 360.87748 232.2347 361.91427 232.84082 362.95789 curveto
+231.74707 362.95789 lineto
+231.06347 361.88692 230.55078 360.83419 230.20898 359.79968 curveto
+229.87174 358.76518 229.70312 357.73751 229.70312 356.71667 curveto
+229.70312 355.7004 229.87174 354.67729 230.20898 353.64734 curveto
+230.54622 352.6174 231.05892 351.56467 231.74707 350.48914 curveto
+232.84082 350.48914 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+238.41895 356.2655 moveto
+237.76269 356.26551 237.24544 356.44096 236.86719 356.79187 curveto
+236.49349 357.14279 236.30664 357.62586 236.30664 358.24109 curveto
+236.30664 358.85633 236.49349 359.3394 236.86719 359.69031 curveto
+237.24544 360.04122 237.76269 360.21668 238.41895 360.21667 curveto
+239.07519 360.21668 239.59244 360.04122 239.9707 359.69031 curveto
+240.34895 359.33484 240.53808 358.85177 240.53809 358.24109 curveto
+240.53808 357.62586 240.34895 357.14279 239.9707 356.79187 curveto
+239.597 356.44096 239.07975 356.26551 238.41895 356.2655 curveto
+237.03809 355.67761 moveto
+236.44564 355.53178 235.98307 355.25607 235.65039 354.85046 curveto
+235.32226 354.44487 235.1582 353.95041 235.1582 353.36707 curveto
+235.1582 352.55132 235.44759 351.90646 236.02637 351.4325 curveto
+236.6097 350.95855 237.40722 350.72157 238.41895 350.72156 curveto
+239.43522 350.72157 240.23274 350.95855 240.81152 351.4325 curveto
+241.39029 351.90646 241.67968 352.55132 241.67969 353.36707 curveto
+241.67968 353.95041 241.51334 354.44487 241.18066 354.85046 curveto
+240.85253 355.25607 240.39452 355.53178 239.80664 355.67761 curveto
+240.472 355.83257 240.98925 356.13563 241.3584 356.58679 curveto
+241.73209 357.03797 241.91894 357.5894 241.91895 358.24109 curveto
+241.91894 359.23002 241.61588 359.98881 241.00977 360.51746 curveto
+240.4082 361.0461 239.54459 361.31042 238.41895 361.31042 curveto
+237.29329 361.31042 236.42741 361.0461 235.82129 360.51746 curveto
+235.21973 359.98881 234.91894 359.23002 234.91895 358.24109 curveto
+234.91894 357.5894 235.10579 357.03797 235.47949 356.58679 curveto
+235.85319 356.13563 236.37272 355.83257 237.03809 355.67761 curveto
+236.53223 353.49695 moveto
+236.53222 354.0256 236.69629 354.43804 237.02441 354.73425 curveto
+237.35709 355.03048 237.82194 355.17859 238.41895 355.17859 curveto
+239.01139 355.17859 239.47395 355.03048 239.80664 354.73425 curveto
+240.14387 354.43804 240.31249 354.0256 240.3125 353.49695 curveto
+240.31249 352.96831 240.14387 352.55588 239.80664 352.25964 curveto
+239.47395 351.96343 239.01139 351.81532 238.41895 351.81531 curveto
+237.82194 351.81532 237.35709 351.96343 237.02441 352.25964 curveto
+236.69629 352.55588 236.53222 352.96831 236.53223 353.49695 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+244.52344 359.37585 moveto
+245.96582 359.37585 lineto
+245.96582 360.55164 lineto
+244.84473 362.73914 lineto
+243.96289 362.73914 lineto
+244.52344 360.55164 lineto
+244.52344 359.37585 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+253.02051 355.60925 moveto
+253.68131 355.75053 254.19628 356.04448 254.56543 356.49109 curveto
+254.93912 356.93771 255.12597 357.48914 255.12598 358.14539 curveto
+255.12597 359.15255 254.77961 359.93185 254.08691 360.48328 curveto
+253.3942 361.03471 252.40983 361.31042 251.13379 361.31042 curveto
+250.7054 361.31042 250.26334 361.26713 249.80762 361.18054 curveto
+249.35644 361.09851 248.88932 360.97319 248.40625 360.80457 curveto
+248.40625 359.47156 lineto
+248.78906 359.69487 249.20833 359.86349 249.66406 359.97742 curveto
+250.11979 360.09135 250.59603 360.14832 251.09277 360.14832 curveto
+251.95865 360.14832 252.61718 359.97742 253.06836 359.63562 curveto
+253.52408 359.29383 253.75195 358.79708 253.75195 358.14539 curveto
+253.75195 357.54383 253.54003 357.07443 253.11621 356.73718 curveto
+252.69693 356.39539 252.11132 356.22449 251.35938 356.22449 curveto
+250.16992 356.22449 lineto
+250.16992 355.08972 lineto
+251.41406 355.08972 lineto
+252.09309 355.08973 252.61262 354.95529 252.97266 354.6864 curveto
+253.33268 354.41297 253.51269 354.02104 253.5127 353.51062 curveto
+253.51269 352.98654 253.32584 352.5855 252.95215 352.3075 curveto
+252.583 352.02495 252.05208 351.88368 251.35938 351.88367 curveto
+250.98112 351.88368 250.57552 351.92469 250.14258 352.00671 curveto
+249.70963 352.08875 249.2334 352.21636 248.71387 352.38953 curveto
+248.71387 351.15906 lineto
+249.23795 351.01323 249.72786 350.90386 250.18359 350.83093 curveto
+250.64388 350.75803 251.07682 350.72157 251.48242 350.72156 curveto
+252.53059 350.72157 253.36002 350.96083 253.9707 351.43933 curveto
+254.58137 351.9133 254.88671 352.55588 254.88672 353.36707 curveto
+254.88671 353.93218 254.72493 354.41069 254.40137 354.80261 curveto
+254.07779 355.18999 253.61751 355.45887 253.02051 355.60925 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+257.89453 359.37585 moveto
+259.33691 359.37585 lineto
+259.33691 360.55164 lineto
+258.21582 362.73914 lineto
+257.33398 362.73914 lineto
+257.89453 360.55164 lineto
+257.89453 359.37585 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+264.99707 354.33777 moveto
+264.32259 354.33778 263.78938 354.6021 263.39746 355.13074 curveto
+263.00553 355.65483 262.80957 356.37488 262.80957 357.29089 curveto
+262.80957 358.20691 263.00325 358.92924 263.39062 359.45789 curveto
+263.78255 359.98198 264.31803 360.24402 264.99707 360.24402 curveto
+265.66699 360.24402 266.19791 359.9797 266.58984 359.45105 curveto
+266.98176 358.92241 267.17773 358.20235 267.17773 357.29089 curveto
+267.17773 356.384 266.98176 355.66622 266.58984 355.13757 curveto
+266.19791 354.60438 265.66699 354.33778 264.99707 354.33777 curveto
+264.99707 353.27136 moveto
+266.09081 353.27137 266.94986 353.62684 267.57422 354.33777 curveto
+268.19856 355.04871 268.51073 356.03309 268.51074 357.29089 curveto
+268.51073 358.54415 268.19856 359.52853 267.57422 360.24402 curveto
+266.94986 360.95496 266.09081 361.31042 264.99707 361.31042 curveto
+263.89876 361.31042 263.03743 360.95496 262.41309 360.24402 curveto
+261.79329 359.52853 261.4834 358.54415 261.4834 357.29089 curveto
+261.4834 356.03309 261.79329 355.04871 262.41309 354.33777 curveto
+263.03743 353.62684 263.89876 353.27137 264.99707 353.27136 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+270.39062 350.48914 moveto
+271.48438 350.48914 lineto
+272.16797 351.56467 272.67838 352.6174 273.01562 353.64734 curveto
+273.35742 354.67729 273.52832 355.7004 273.52832 356.71667 curveto
+273.52832 357.73751 273.35742 358.76518 273.01562 359.79968 curveto
+272.67838 360.83419 272.16797 361.88692 271.48438 362.95789 curveto
+270.39062 362.95789 lineto
+270.99674 361.91427 271.44791 360.87748 271.74414 359.84753 curveto
+272.04492 358.81303 272.19531 357.76941 272.19531 356.71667 curveto
+272.19531 355.66395 272.04492 354.62488 271.74414 353.59949 curveto
+271.44791 352.57411 270.99674 351.53732 270.39062 350.48914 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+278.84082 350.64343 moveto
+278.23014 351.69162 277.77669 352.7284 277.48047 353.75378 curveto
+277.18424 354.77918 277.03613 355.81824 277.03613 356.87097 curveto
+277.03613 357.92371 277.18424 358.96733 277.48047 360.00183 curveto
+277.78125 361.03178 278.2347 362.06856 278.84082 363.11218 curveto
+277.74707 363.11218 lineto
+277.06347 362.04122 276.55078 360.98848 276.20898 359.95398 curveto
+275.87174 358.91948 275.70312 357.89181 275.70312 356.87097 curveto
+275.70312 355.8547 275.87174 354.83159 276.20898 353.80164 curveto
+276.54622 352.7717 277.05892 351.71896 277.74707 350.64343 curveto
+278.84082 350.64343 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+281.50684 361.05457 moveto
+281.50684 359.79675 lineto
+281.85319 359.96082 282.2041 360.08614 282.55957 360.17273 curveto
+282.91504 360.25932 283.26367 360.30261 283.60547 360.30261 curveto
+284.51692 360.30261 285.21191 359.99728 285.69043 359.3866 curveto
+286.1735 358.77136 286.44921 357.8394 286.51758 356.5907 curveto
+286.25325 356.98263 285.91829 357.28341 285.5127 357.49304 curveto
+285.10709 357.70268 284.6582 357.8075 284.16602 357.8075 curveto
+283.14518 357.8075 282.33626 357.49988 281.73926 356.88464 curveto
+281.14681 356.26486 280.85059 355.41948 280.85059 354.34851 curveto
+280.85059 353.30034 281.16048 352.45952 281.78027 351.82605 curveto
+282.40006 351.1926 283.22493 350.87586 284.25488 350.87585 curveto
+285.43522 350.87586 286.33528 351.32931 286.95508 352.23621 curveto
+287.57942 353.13856 287.89159 354.45106 287.8916 356.17371 curveto
+287.89159 357.78243 287.50878 359.06759 286.74316 360.02917 curveto
+285.98209 360.98621 284.9567 361.46472 283.66699 361.46472 curveto
+283.32063 361.46472 282.96972 361.43054 282.61426 361.36218 curveto
+282.25879 361.29382 281.88965 361.19128 281.50684 361.05457 curveto
+284.25488 356.72742 moveto
+284.87467 356.72742 285.36458 356.51551 285.72461 356.09167 curveto
+286.08919 355.66785 286.27148 355.0868 286.27148 354.34851 curveto
+286.27148 353.61479 286.08919 353.03602 285.72461 352.61218 curveto
+285.36458 352.18381 284.87467 351.96961 284.25488 351.9696 curveto
+283.63509 351.96961 283.1429 352.18381 282.77832 352.61218 curveto
+282.41829 353.03602 282.23828 353.61479 282.23828 354.34851 curveto
+282.23828 355.0868 282.41829 355.66785 282.77832 356.09167 curveto
+283.1429 356.51551 283.63509 356.72742 284.25488 356.72742 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+290.52344 359.53015 moveto
+291.96582 359.53015 lineto
+291.96582 360.70593 lineto
+290.84473 362.89343 lineto
+289.96289 362.89343 lineto
+290.52344 360.70593 lineto
+290.52344 359.53015 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+298.63086 352.26355 moveto
+295.14453 357.71179 lineto
+298.63086 357.71179 lineto
+298.63086 352.26355 lineto
+298.26855 351.06042 moveto
+300.00488 351.06042 lineto
+300.00488 357.71179 lineto
+301.46094 357.71179 lineto
+301.46094 358.86023 lineto
+300.00488 358.86023 lineto
+300.00488 361.26648 lineto
+298.63086 361.26648 lineto
+298.63086 358.86023 lineto
+294.02344 358.86023 lineto
+294.02344 357.52722 lineto
+298.26855 351.06042 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+303.89453 359.53015 moveto
+305.33691 359.53015 lineto
+305.33691 360.70593 lineto
+304.21582 362.89343 lineto
+303.33398 362.89343 lineto
+303.89453 360.70593 lineto
+303.89453 359.53015 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+310.99707 354.49207 moveto
+310.32259 354.49207 309.78938 354.75639 309.39746 355.28503 curveto
+309.00553 355.80913 308.80957 356.52918 308.80957 357.44519 curveto
+308.80957 358.36121 309.00325 359.08354 309.39062 359.61218 curveto
+309.78255 360.13627 310.31803 360.39832 310.99707 360.39832 curveto
+311.66699 360.39832 312.19791 360.13399 312.58984 359.60535 curveto
+312.98176 359.0767 313.17773 358.35665 313.17773 357.44519 curveto
+313.17773 356.53829 312.98176 355.82052 312.58984 355.29187 curveto
+312.19791 354.75867 311.66699 354.49207 310.99707 354.49207 curveto
+310.99707 353.42566 moveto
+312.09081 353.42567 312.94986 353.78114 313.57422 354.49207 curveto
+314.19856 355.20301 314.51073 356.18738 314.51074 357.44519 curveto
+314.51073 358.69845 314.19856 359.68282 313.57422 360.39832 curveto
+312.94986 361.10925 312.09081 361.46472 310.99707 361.46472 curveto
+309.89876 361.46472 309.03743 361.10925 308.41309 360.39832 curveto
+307.79329 359.68282 307.4834 358.69845 307.4834 357.44519 curveto
+307.4834 356.18738 307.79329 355.20301 308.41309 354.49207 curveto
+309.03743 353.78114 309.89876 353.42567 310.99707 353.42566 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+316.39062 350.64343 moveto
+317.48438 350.64343 lineto
+318.16797 351.71896 318.67838 352.7717 319.01562 353.80164 curveto
+319.35742 354.83159 319.52832 355.8547 319.52832 356.87097 curveto
+319.52832 357.89181 319.35742 358.91948 319.01562 359.95398 curveto
+318.67838 360.98848 318.16797 362.04122 317.48438 363.11218 curveto
+316.39062 363.11218 lineto
+316.99674 362.06856 317.44791 361.03178 317.74414 360.00183 curveto
+318.04492 358.96733 318.19531 357.92371 318.19531 356.87097 curveto
+318.19531 355.81824 318.04492 354.77918 317.74414 353.75378 curveto
+317.44791 352.7284 316.99674 351.69162 316.39062 350.64343 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+197.1377 371.14343 moveto
+196.52701 372.19162 196.07356 373.2284 195.77734 374.25378 curveto
+195.48112 375.27918 195.33301 376.31824 195.33301 377.37097 curveto
+195.33301 378.42371 195.48112 379.46733 195.77734 380.50183 curveto
+196.07812 381.53178 196.53157 382.56856 197.1377 383.61218 curveto
+196.04395 383.61218 lineto
+195.36035 382.54122 194.84765 381.48848 194.50586 380.45398 curveto
+194.16862 379.41948 194 378.39181 194 377.37097 curveto
+194 376.3547 194.16862 375.33159 194.50586 374.30164 curveto
+194.8431 373.2717 195.35579 372.21896 196.04395 371.14343 curveto
+197.1377 371.14343 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+200.00195 380.60437 moveto
+202.25781 380.60437 lineto
+202.25781 372.81824 lineto
+199.80371 373.31042 lineto
+199.80371 372.05261 lineto
+202.24414 371.56042 lineto
+203.625 371.56042 lineto
+203.625 380.60437 lineto
+205.88086 380.60437 lineto
+205.88086 381.76648 lineto
+200.00195 381.76648 lineto
+200.00195 380.60437 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+211.62988 372.4696 moveto
+210.91894 372.46961 210.38346 372.82052 210.02344 373.52234 curveto
+209.66797 374.21961 209.49023 375.27007 209.49023 376.67371 curveto
+209.49023 378.0728 209.66797 379.12325 210.02344 379.82507 curveto
+210.38346 380.52234 210.91894 380.87097 211.62988 380.87097 curveto
+212.34537 380.87097 212.88085 380.52234 213.23633 379.82507 curveto
+213.59635 379.12325 213.77636 378.0728 213.77637 376.67371 curveto
+213.77636 375.27007 213.59635 374.21961 213.23633 373.52234 curveto
+212.88085 372.82052 212.34537 372.46961 211.62988 372.4696 curveto
+211.62988 371.37585 moveto
+212.77376 371.37586 213.64648 371.82931 214.24805 372.73621 curveto
+214.85416 373.63856 215.15722 374.95106 215.15723 376.67371 curveto
+215.15722 378.39181 214.85416 379.70431 214.24805 380.61121 curveto
+213.64648 381.51355 212.77376 381.96472 211.62988 381.96472 curveto
+210.486 381.96472 209.611 381.51355 209.00488 380.61121 curveto
+208.40332 379.70431 208.10254 378.39181 208.10254 376.67371 curveto
+208.10254 374.95106 208.40332 373.63856 209.00488 372.73621 curveto
+209.611 371.82931 210.486 371.37586 211.62988 371.37585 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+217.73438 380.03015 moveto
+219.17676 380.03015 lineto
+219.17676 381.20593 lineto
+218.05566 383.39343 lineto
+217.17383 383.39343 lineto
+217.73438 381.20593 lineto
+217.73438 380.03015 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+222.06152 371.56042 moveto
+227.48242 371.56042 lineto
+227.48242 372.72253 lineto
+223.32617 372.72253 lineto
+223.32617 375.22449 lineto
+223.52669 375.15613 223.72721 375.106 223.92773 375.0741 curveto
+224.12825 375.03765 224.32877 375.01942 224.5293 375.01941 curveto
+225.66861 375.01942 226.57096 375.33159 227.23633 375.95593 curveto
+227.90169 376.58029 228.23437 377.42566 228.23438 378.49207 curveto
+228.23437 379.59037 227.89257 380.44487 227.20898 381.05554 curveto
+226.52538 381.66166 225.56152 381.96472 224.31738 381.96472 curveto
+223.88899 381.96472 223.45149 381.92826 223.00488 381.85535 curveto
+222.56282 381.78243 222.10482 381.67306 221.63086 381.52722 curveto
+221.63086 380.13953 lineto
+222.04101 380.36284 222.46484 380.52918 222.90234 380.63855 curveto
+223.33984 380.74793 223.80241 380.80261 224.29004 380.80261 curveto
+225.07845 380.80261 225.70279 380.59526 226.16309 380.18054 curveto
+226.62337 379.76583 226.85351 379.20301 226.85352 378.49207 curveto
+226.85351 377.78113 226.62337 377.21831 226.16309 376.80359 curveto
+225.70279 376.38888 225.07845 376.18152 224.29004 376.18152 curveto
+223.9209 376.18152 223.55175 376.22254 223.18262 376.30457 curveto
+222.81803 376.3866 222.44433 376.51421 222.06152 376.68738 curveto
+222.06152 371.56042 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+231.10547 380.03015 moveto
+232.54785 380.03015 lineto
+232.54785 381.20593 lineto
+231.42676 383.39343 lineto
+230.54492 383.39343 lineto
+231.10547 381.20593 lineto
+231.10547 380.03015 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+238.20801 374.99207 moveto
+237.53353 374.99207 237.00032 375.25639 236.6084 375.78503 curveto
+236.21647 376.30913 236.02051 377.02918 236.02051 377.94519 curveto
+236.02051 378.86121 236.21419 379.58354 236.60156 380.11218 curveto
+236.99349 380.63627 237.52897 380.89832 238.20801 380.89832 curveto
+238.87792 380.89832 239.40885 380.63399 239.80078 380.10535 curveto
+240.1927 379.5767 240.38867 378.85665 240.38867 377.94519 curveto
+240.38867 377.03829 240.1927 376.32052 239.80078 375.79187 curveto
+239.40885 375.25867 238.87792 374.99207 238.20801 374.99207 curveto
+238.20801 373.92566 moveto
+239.30175 373.92567 240.1608 374.28114 240.78516 374.99207 curveto
+241.4095 375.70301 241.72167 376.68738 241.72168 377.94519 curveto
+241.72167 379.19845 241.4095 380.18282 240.78516 380.89832 curveto
+240.1608 381.60925 239.30175 381.96472 238.20801 381.96472 curveto
+237.1097 381.96472 236.24837 381.60925 235.62402 380.89832 curveto
+235.00423 380.18282 234.69434 379.19845 234.69434 377.94519 curveto
+234.69434 376.68738 235.00423 375.70301 235.62402 374.99207 curveto
+236.24837 374.28114 237.1097 373.92567 238.20801 373.92566 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+243.60156 371.14343 moveto
+244.69531 371.14343 lineto
+245.3789 372.21896 245.88932 373.2717 246.22656 374.30164 curveto
+246.56836 375.33159 246.73925 376.3547 246.73926 377.37097 curveto
+246.73925 378.39181 246.56836 379.41948 246.22656 380.45398 curveto
+245.88932 381.48848 245.3789 382.54122 244.69531 383.61218 curveto
+243.60156 383.61218 lineto
+244.20768 382.56856 244.65885 381.53178 244.95508 380.50183 curveto
+245.25586 379.46733 245.40625 378.42371 245.40625 377.37097 curveto
+245.40625 376.31824 245.25586 375.27918 244.95508 374.25378 curveto
+244.65885 373.2284 244.20768 372.19162 243.60156 371.14343 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+252.1377 371.98914 moveto
+251.52701 373.03732 251.07356 374.07411 250.77734 375.09949 curveto
+250.48112 376.12488 250.33301 377.16395 250.33301 378.21667 curveto
+250.33301 379.26941 250.48112 380.31303 250.77734 381.34753 curveto
+251.07812 382.37748 251.53157 383.41427 252.1377 384.45789 curveto
+251.04395 384.45789 lineto
+250.36035 383.38692 249.84765 382.33419 249.50586 381.29968 curveto
+249.16862 380.26518 249 379.23751 249 378.21667 curveto
+249 377.2004 249.16862 376.17729 249.50586 375.14734 curveto
+249.8431 374.1174 250.35579 373.06467 251.04395 371.98914 curveto
+252.1377 371.98914 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+255.00195 381.45007 moveto
+257.25781 381.45007 lineto
+257.25781 373.66394 lineto
+254.80371 374.15613 lineto
+254.80371 372.89832 lineto
+257.24414 372.40613 lineto
+258.625 372.40613 lineto
+258.625 381.45007 lineto
+260.88086 381.45007 lineto
+260.88086 382.61218 lineto
+255.00195 382.61218 lineto
+255.00195 381.45007 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+263.91602 381.45007 moveto
+266.17188 381.45007 lineto
+266.17188 373.66394 lineto
+263.71777 374.15613 lineto
+263.71777 372.89832 lineto
+266.1582 372.40613 lineto
+267.53906 372.40613 lineto
+267.53906 381.45007 lineto
+269.79492 381.45007 lineto
+269.79492 382.61218 lineto
+263.91602 382.61218 lineto
+263.91602 381.45007 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+272.73438 380.87585 moveto
+274.17676 380.87585 lineto
+274.17676 382.05164 lineto
+273.05566 384.23914 lineto
+272.17383 384.23914 lineto
+272.73438 382.05164 lineto
+272.73438 380.87585 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+280.17188 376.95886 moveto
+279.55208 376.95887 279.05989 377.17078 278.69531 377.5946 curveto
+278.33528 378.01844 278.15527 378.59949 278.15527 379.33777 curveto
+278.15527 380.0715 278.33528 380.65255 278.69531 381.08093 curveto
+279.05989 381.50476 279.55208 381.71668 280.17188 381.71667 curveto
+280.79166 381.71668 281.28157 381.50476 281.6416 381.08093 curveto
+282.00618 380.65255 282.18847 380.0715 282.18848 379.33777 curveto
+282.18847 378.59949 282.00618 378.01844 281.6416 377.5946 curveto
+281.28157 377.17078 280.79166 376.95887 280.17188 376.95886 curveto
+282.91309 372.63171 moveto
+282.91309 373.88953 lineto
+282.56672 373.72547 282.21581 373.60015 281.86035 373.51355 curveto
+281.50943 373.42697 281.1608 373.38368 280.81445 373.38367 curveto
+279.90299 373.38368 279.20573 373.69129 278.72266 374.30652 curveto
+278.24414 374.92176 277.9707 375.85145 277.90234 377.09558 curveto
+278.17122 376.6991 278.50846 376.39604 278.91406 376.1864 curveto
+279.31966 375.97222 279.76627 375.86512 280.25391 375.86511 curveto
+281.27929 375.86512 282.08821 376.17729 282.68066 376.80164 curveto
+283.27766 377.42143 283.57616 378.26681 283.57617 379.33777 curveto
+283.57616 380.38595 283.26627 381.22677 282.64648 381.86023 curveto
+282.02669 382.49369 281.20182 382.81042 280.17188 382.81042 curveto
+278.99153 382.81042 278.08919 382.35925 277.46484 381.45691 curveto
+276.84049 380.55001 276.52832 379.23751 276.52832 377.51941 curveto
+276.52832 375.90613 276.91113 374.62098 277.67676 373.66394 curveto
+278.44238 372.70236 279.47005 372.22157 280.75977 372.22156 curveto
+281.10611 372.22157 281.45475 372.25575 281.80566 372.3241 curveto
+282.16113 372.39247 282.53027 372.49501 282.91309 372.63171 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+286.10547 380.87585 moveto
+287.54785 380.87585 lineto
+287.54785 382.05164 lineto
+286.42676 384.23914 lineto
+285.54492 384.23914 lineto
+286.10547 382.05164 lineto
+286.10547 380.87585 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+293.20801 375.83777 moveto
+292.53353 375.83778 292.00032 376.1021 291.6084 376.63074 curveto
+291.21647 377.15483 291.02051 377.87488 291.02051 378.79089 curveto
+291.02051 379.70691 291.21419 380.42924 291.60156 380.95789 curveto
+291.99349 381.48198 292.52897 381.74402 293.20801 381.74402 curveto
+293.87792 381.74402 294.40885 381.4797 294.80078 380.95105 curveto
+295.1927 380.42241 295.38867 379.70235 295.38867 378.79089 curveto
+295.38867 377.884 295.1927 377.16622 294.80078 376.63757 curveto
+294.40885 376.10438 293.87792 375.83778 293.20801 375.83777 curveto
+293.20801 374.77136 moveto
+294.30175 374.77137 295.1608 375.12684 295.78516 375.83777 curveto
+296.4095 376.54871 296.72167 377.53309 296.72168 378.79089 curveto
+296.72167 380.04415 296.4095 381.02853 295.78516 381.74402 curveto
+295.1608 382.45496 294.30175 382.81042 293.20801 382.81042 curveto
+292.1097 382.81042 291.24837 382.45496 290.62402 381.74402 curveto
+290.00423 381.02853 289.69434 380.04415 289.69434 378.79089 curveto
+289.69434 377.53309 290.00423 376.54871 290.62402 375.83777 curveto
+291.24837 375.12684 292.1097 374.77137 293.20801 374.77136 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+298.60156 371.98914 moveto
+299.69531 371.98914 lineto
+300.3789 373.06467 300.88932 374.1174 301.22656 375.14734 curveto
+301.56836 376.17729 301.73925 377.2004 301.73926 378.21667 curveto
+301.73925 379.23751 301.56836 380.26518 301.22656 381.29968 curveto
+300.88932 382.33419 300.3789 383.38692 299.69531 384.45789 curveto
+298.60156 384.45789 lineto
+299.20768 383.41427 299.65885 382.37748 299.95508 381.34753 curveto
+300.25586 380.31303 300.40625 379.26941 300.40625 378.21667 curveto
+300.40625 377.16395 300.25586 376.12488 299.95508 375.09949 curveto
+299.65885 374.07411 299.20768 373.03732 298.60156 371.98914 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+238.37402 312.40613 moveto
+239.75488 312.40613 lineto
+239.75488 322.61218 lineto
+238.37402 322.61218 lineto
+238.37402 312.40613 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+247.3291 315.18152 moveto
+247.3291 316.37097 lineto
+246.97363 316.18869 246.60449 316.05197 246.22168 315.96082 curveto
+245.83886 315.86968 245.44238 315.8241 245.03223 315.8241 curveto
+244.40787 315.8241 243.93847 315.91981 243.62402 316.11121 curveto
+243.31413 316.30262 243.15918 316.58973 243.15918 316.97253 curveto
+243.15918 317.26421 243.27083 317.49435 243.49414 317.66296 curveto
+243.71745 317.82703 244.16634 317.98426 244.84082 318.13464 curveto
+245.27148 318.23035 lineto
+246.16471 318.42176 246.79817 318.69292 247.17188 319.04382 curveto
+247.55012 319.39018 247.73925 319.87553 247.73926 320.49988 curveto
+247.73925 321.21082 247.4567 321.77364 246.8916 322.18835 curveto
+246.33105 322.60307 245.55859 322.81042 244.57422 322.81042 curveto
+244.16406 322.81042 243.73567 322.76941 243.28906 322.68738 curveto
+242.847 322.6099 242.37988 322.49141 241.8877 322.33191 curveto
+241.8877 321.03308 lineto
+242.35254 321.27462 242.81055 321.45691 243.26172 321.57996 curveto
+243.71289 321.69845 244.1595 321.75769 244.60156 321.75769 curveto
+245.19401 321.75769 245.64974 321.65743 245.96875 321.45691 curveto
+246.28776 321.25183 246.44726 320.96472 246.44727 320.59558 curveto
+246.44726 320.25379 246.33105 319.99174 246.09863 319.80945 curveto
+245.87076 319.62716 245.36718 319.4517 244.58789 319.28308 curveto
+244.15039 319.18054 lineto
+243.37109 319.01648 242.80827 318.76583 242.46191 318.42859 curveto
+242.11556 318.0868 241.94238 317.61967 241.94238 317.02722 curveto
+241.94238 316.30718 242.19759 315.75119 242.70801 315.35925 curveto
+243.21842 314.96733 243.94303 314.77137 244.88184 314.77136 curveto
+245.34668 314.77137 245.78418 314.80555 246.19434 314.8739 curveto
+246.60449 314.94227 246.98274 315.04481 247.3291 315.18152 curveto
+fill
+grestore
+gsave [1 0 0 1 3.250285 1] concat
+gsave
+0 0 0 setrgbcolor
+newpath
+180.84082 436.23914 moveto
+180.23014 437.28732 179.77669 438.32411 179.48047 439.34949 curveto
+179.18424 440.37488 179.03613 441.41395 179.03613 442.46667 curveto
+179.03613 443.51941 179.18424 444.56303 179.48047 445.59753 curveto
+179.78125 446.62748 180.2347 447.66427 180.84082 448.70789 curveto
+179.74707 448.70789 lineto
+179.06347 447.63692 178.55078 446.58419 178.20898 445.54968 curveto
+177.87174 444.51518 177.70312 443.48751 177.70312 442.46667 curveto
+177.70312 441.4504 177.87174 440.42729 178.20898 439.39734 curveto
+178.54622 438.3674 179.05892 437.31467 179.74707 436.23914 curveto
+180.84082 436.23914 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+184.65527 445.70007 moveto
+189.47461 445.70007 lineto
+189.47461 446.86218 lineto
+182.99414 446.86218 lineto
+182.99414 445.70007 lineto
+183.51823 445.15776 184.23144 444.43087 185.13379 443.51941 curveto
+186.04069 442.6034 186.61035 442.01323 186.84277 441.7489 curveto
+187.28483 441.25216 187.59244 440.83289 187.76562 440.49109 curveto
+187.94335 440.14474 188.03222 439.80522 188.03223 439.47253 curveto
+188.03222 438.93022 187.84081 438.48817 187.45801 438.14636 curveto
+187.07975 437.80457 186.58528 437.63368 185.97461 437.63367 curveto
+185.54166 437.63368 185.08366 437.70887 184.60059 437.85925 curveto
+184.12207 438.00965 183.60937 438.23752 183.0625 438.54285 curveto
+183.0625 437.14832 lineto
+183.61849 436.92502 184.13802 436.7564 184.62109 436.64246 curveto
+185.10416 436.52853 185.54622 436.47157 185.94727 436.47156 curveto
+187.00455 436.47157 187.84765 436.73589 188.47656 437.26453 curveto
+189.10546 437.79318 189.41991 438.49956 189.41992 439.38367 curveto
+189.41991 439.80294 189.34016 440.20171 189.18066 440.57996 curveto
+189.02571 440.95366 188.74088 441.39572 188.32617 441.90613 curveto
+188.21223 442.03829 187.84993 442.42111 187.23926 443.05457 curveto
+186.62858 443.68347 185.76725 444.56531 184.65527 445.70007 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+192.52344 445.12585 moveto
+193.96582 445.12585 lineto
+193.96582 446.30164 lineto
+192.84473 448.48914 lineto
+191.96289 448.48914 lineto
+192.52344 446.30164 lineto
+192.52344 445.12585 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+199.79004 437.56531 moveto
+199.0791 437.56532 198.54362 437.91623 198.18359 438.61804 curveto
+197.82812 439.31532 197.65039 440.36577 197.65039 441.76941 curveto
+197.65039 443.1685 197.82812 444.21896 198.18359 444.92078 curveto
+198.54362 445.61804 199.0791 445.96668 199.79004 445.96667 curveto
+200.50553 445.96668 201.04101 445.61804 201.39648 444.92078 curveto
+201.7565 444.21896 201.93652 443.1685 201.93652 441.76941 curveto
+201.93652 440.36577 201.7565 439.31532 201.39648 438.61804 curveto
+201.04101 437.91623 200.50553 437.56532 199.79004 437.56531 curveto
+199.79004 436.47156 moveto
+200.93391 436.47157 201.80663 436.92502 202.4082 437.83191 curveto
+203.01432 438.73426 203.31737 440.04676 203.31738 441.76941 curveto
+203.31737 443.48751 203.01432 444.80001 202.4082 445.70691 curveto
+201.80663 446.60925 200.93391 447.06042 199.79004 447.06042 curveto
+198.64616 447.06042 197.77116 446.60925 197.16504 445.70691 curveto
+196.56348 444.80001 196.26269 443.48751 196.2627 441.76941 curveto
+196.26269 440.04676 196.56348 438.73426 197.16504 437.83191 curveto
+197.77116 436.92502 198.64616 436.47157 199.79004 436.47156 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+205.89453 445.12585 moveto
+207.33691 445.12585 lineto
+207.33691 446.30164 lineto
+206.21582 448.48914 lineto
+205.33398 448.48914 lineto
+205.89453 446.30164 lineto
+205.89453 445.12585 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+215.06836 442.94519 moveto
+215.06835 442.03374 214.87923 441.32736 214.50098 440.82605 curveto
+214.12727 440.32475 213.60091 440.0741 212.92188 440.0741 curveto
+212.24739 440.0741 211.72103 440.32475 211.34277 440.82605 curveto
+210.96907 441.32736 210.78222 442.03374 210.78223 442.94519 curveto
+210.78222 443.85209 210.96907 444.5562 211.34277 445.0575 curveto
+211.72103 445.5588 212.24739 445.80945 212.92188 445.80945 curveto
+213.60091 445.80945 214.12727 445.5588 214.50098 445.0575 curveto
+214.87923 444.5562 215.06835 443.85209 215.06836 442.94519 curveto
+216.32617 445.91199 moveto
+216.32616 447.21537 216.03678 448.1838 215.45801 448.81726 curveto
+214.87923 449.45528 213.99283 449.77429 212.79883 449.77429 curveto
+212.35677 449.77429 211.93978 449.74011 211.54785 449.67175 curveto
+211.15592 449.60795 210.77539 449.50769 210.40625 449.37097 curveto
+210.40625 448.14734 lineto
+210.77539 448.34786 211.13997 448.49597 211.5 448.59167 curveto
+211.86002 448.68738 212.22688 448.73523 212.60059 448.73523 curveto
+213.42545 448.73523 214.04296 448.51876 214.45312 448.08582 curveto
+214.86328 447.65743 215.06835 447.00802 215.06836 446.13757 curveto
+215.06836 445.5155 lineto
+214.80859 445.96668 214.47591 446.30391 214.07031 446.52722 curveto
+213.66471 446.75053 213.17936 446.86218 212.61426 446.86218 curveto
+211.67545 446.86218 210.91894 446.50444 210.34473 445.78894 curveto
+209.77051 445.07345 209.4834 444.12553 209.4834 442.94519 curveto
+209.4834 441.7603 209.77051 440.81011 210.34473 440.0946 curveto
+210.91894 439.37912 211.67545 439.02137 212.61426 439.02136 curveto
+213.17936 439.02137 213.66471 439.13302 214.07031 439.35632 curveto
+214.47591 439.57964 214.80859 439.91688 215.06836 440.36804 curveto
+215.06836 439.20593 lineto
+216.32617 439.20593 lineto
+216.32617 445.91199 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+218.71875 436.23914 moveto
+219.8125 436.23914 lineto
+220.49609 437.31467 221.00651 438.3674 221.34375 439.39734 curveto
+221.68554 440.42729 221.85644 441.4504 221.85645 442.46667 curveto
+221.85644 443.48751 221.68554 444.51518 221.34375 445.54968 curveto
+221.00651 446.58419 220.49609 447.63692 219.8125 448.70789 curveto
+218.71875 448.70789 lineto
+219.32487 447.66427 219.77604 446.62748 220.07227 445.59753 curveto
+220.37304 444.56303 220.52343 443.51941 220.52344 442.46667 curveto
+220.52343 441.41395 220.37304 440.37488 220.07227 439.34949 curveto
+219.77604 438.32411 219.32487 437.28732 218.71875 436.23914 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+228.53711 436.77234 moveto
+227.92643 437.82053 227.47298 438.85731 227.17676 439.88269 curveto
+226.88053 440.90809 226.73242 441.94715 226.73242 442.99988 curveto
+226.73242 444.05262 226.88053 445.09623 227.17676 446.13074 curveto
+227.47754 447.16069 227.93099 448.19747 228.53711 449.24109 curveto
+227.44336 449.24109 lineto
+226.75976 448.17012 226.24707 447.11739 225.90527 446.08289 curveto
+225.56803 445.04838 225.39941 444.02071 225.39941 442.99988 curveto
+225.39941 441.98361 225.56803 440.9605 225.90527 439.93054 curveto
+226.24251 438.9006 226.75521 437.84787 227.44336 436.77234 curveto
+228.53711 436.77234 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+235.3457 441.89246 moveto
+236.0065 442.03374 236.52148 442.32768 236.89062 442.77429 curveto
+237.26432 443.22091 237.45116 443.77234 237.45117 444.42859 curveto
+237.45116 445.43575 237.10481 446.21505 236.41211 446.76648 curveto
+235.71939 447.31791 234.73502 447.59363 233.45898 447.59363 curveto
+233.0306 447.59363 232.58854 447.55033 232.13281 447.46375 curveto
+231.68164 447.38171 231.21452 447.25639 230.73145 447.08777 curveto
+230.73145 445.75476 lineto
+231.11426 445.97807 231.53353 446.14669 231.98926 446.26062 curveto
+232.44498 446.37455 232.92122 446.43152 233.41797 446.43152 curveto
+234.28385 446.43152 234.94238 446.26062 235.39355 445.91882 curveto
+235.84928 445.57703 236.07714 445.08028 236.07715 444.42859 curveto
+236.07714 443.82703 235.86523 443.35763 235.44141 443.02039 curveto
+235.02213 442.67859 234.43652 442.5077 233.68457 442.50769 curveto
+232.49512 442.50769 lineto
+232.49512 441.37292 lineto
+233.73926 441.37292 lineto
+234.41829 441.37293 234.93782 441.23849 235.29785 440.9696 curveto
+235.65787 440.69617 235.83788 440.30425 235.83789 439.79382 curveto
+235.83788 439.26974 235.65104 438.8687 235.27734 438.5907 curveto
+234.9082 438.30816 234.37727 438.16688 233.68457 438.16687 curveto
+233.30631 438.16688 232.90071 438.20789 232.46777 438.28992 curveto
+232.03483 438.37196 231.55859 438.49956 231.03906 438.67273 curveto
+231.03906 437.44226 lineto
+231.56315 437.29644 232.05306 437.18706 232.50879 437.11414 curveto
+232.96907 437.04123 233.40201 437.00477 233.80762 437.00476 curveto
+234.85579 437.00477 235.68522 437.24403 236.2959 437.72253 curveto
+236.90657 438.1965 237.21191 438.83908 237.21191 439.65027 curveto
+237.21191 440.21538 237.05012 440.6939 236.72656 441.08582 curveto
+236.40299 441.47319 235.9427 441.74207 235.3457 441.89246 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+240.21973 445.65906 moveto
+241.66211 445.65906 lineto
+241.66211 446.83484 lineto
+240.54102 449.02234 lineto
+239.65918 449.02234 lineto
+240.21973 446.83484 lineto
+240.21973 445.65906 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+244.77246 446.23328 moveto
+247.02832 446.23328 lineto
+247.02832 438.44714 lineto
+244.57422 438.93933 lineto
+244.57422 437.68152 lineto
+247.01465 437.18933 lineto
+248.39551 437.18933 lineto
+248.39551 446.23328 lineto
+250.65137 446.23328 lineto
+250.65137 447.39539 lineto
+244.77246 447.39539 lineto
+244.77246 446.23328 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+253.59082 445.65906 moveto
+255.0332 445.65906 lineto
+255.0332 446.83484 lineto
+253.91211 449.02234 lineto
+253.03027 449.02234 lineto
+253.59082 446.83484 lineto
+253.59082 445.65906 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+260.69336 440.62097 moveto
+260.01888 440.62098 259.48567 440.8853 259.09375 441.41394 curveto
+258.70182 441.93803 258.50586 442.65809 258.50586 443.5741 curveto
+258.50586 444.49012 258.69954 445.21245 259.08691 445.74109 curveto
+259.47884 446.26518 260.01432 446.52722 260.69336 446.52722 curveto
+261.36328 446.52722 261.8942 446.2629 262.28613 445.73425 curveto
+262.67805 445.20561 262.87402 444.48556 262.87402 443.5741 curveto
+262.87402 442.6672 262.67805 441.94943 262.28613 441.42078 curveto
+261.8942 440.88758 261.36328 440.62098 260.69336 440.62097 curveto
+260.69336 439.55457 moveto
+261.7871 439.55457 262.64615 439.91004 263.27051 440.62097 curveto
+263.89485 441.33192 264.20702 442.31629 264.20703 443.5741 curveto
+264.20702 444.82735 263.89485 445.81173 263.27051 446.52722 curveto
+262.64615 447.23816 261.7871 447.59363 260.69336 447.59363 curveto
+259.59505 447.59363 258.73372 447.23816 258.10938 446.52722 curveto
+257.48958 445.81173 257.17969 444.82735 257.17969 443.5741 curveto
+257.17969 442.31629 257.48958 441.33192 258.10938 440.62097 curveto
+258.73372 439.91004 259.59505 439.55457 260.69336 439.55457 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+266.08691 436.77234 moveto
+267.18066 436.77234 lineto
+267.86425 437.84787 268.37467 438.9006 268.71191 439.93054 curveto
+269.05371 440.9605 269.22461 441.98361 269.22461 442.99988 curveto
+269.22461 444.02071 269.05371 445.04838 268.71191 446.08289 curveto
+268.37467 447.11739 267.86425 448.17012 267.18066 449.24109 curveto
+266.08691 449.24109 lineto
+266.69303 448.19747 267.1442 447.16069 267.44043 446.13074 curveto
+267.74121 445.09623 267.8916 444.05262 267.8916 442.99988 curveto
+267.8916 441.94715 267.74121 440.90809 267.44043 439.88269 curveto
+267.1442 438.85731 266.69303 437.82053 266.08691 436.77234 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+275.5498 436.77234 moveto
+274.93912 437.82053 274.48567 438.85731 274.18945 439.88269 curveto
+273.89323 440.90809 273.74511 441.94715 273.74512 442.99988 curveto
+273.74511 444.05262 273.89323 445.09623 274.18945 446.13074 curveto
+274.49023 447.16069 274.94368 448.19747 275.5498 449.24109 curveto
+274.45605 449.24109 lineto
+273.77246 448.17012 273.25976 447.11739 272.91797 446.08289 curveto
+272.58073 445.04838 272.41211 444.02071 272.41211 442.99988 curveto
+272.41211 441.98361 272.58073 440.9605 272.91797 439.93054 curveto
+273.25521 438.9006 273.7679 437.84787 274.45605 436.77234 curveto
+275.5498 436.77234 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+281.96875 438.39246 moveto
+278.48242 443.8407 lineto
+281.96875 443.8407 lineto
+281.96875 438.39246 lineto
+281.60645 437.18933 moveto
+283.34277 437.18933 lineto
+283.34277 443.8407 lineto
+284.79883 443.8407 lineto
+284.79883 444.98914 lineto
+283.34277 444.98914 lineto
+283.34277 447.39539 lineto
+281.96875 447.39539 lineto
+281.96875 444.98914 lineto
+277.36133 444.98914 lineto
+277.36133 443.65613 lineto
+281.60645 437.18933 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+287.23242 445.65906 moveto
+288.6748 445.65906 lineto
+288.6748 446.83484 lineto
+287.55371 449.02234 lineto
+286.67188 449.02234 lineto
+287.23242 446.83484 lineto
+287.23242 445.65906 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+292.73535 446.23328 moveto
+297.55469 446.23328 lineto
+297.55469 447.39539 lineto
+291.07422 447.39539 lineto
+291.07422 446.23328 lineto
+291.59831 445.69096 292.31152 444.96407 293.21387 444.05261 curveto
+294.12076 443.1366 294.69043 442.54643 294.92285 442.2821 curveto
+295.3649 441.78537 295.67252 441.36609 295.8457 441.02429 curveto
+296.02343 440.67794 296.1123 440.33843 296.1123 440.00574 curveto
+296.1123 439.46343 295.92089 439.02137 295.53809 438.67957 curveto
+295.15983 438.33778 294.66536 438.16688 294.05469 438.16687 curveto
+293.62174 438.16688 293.16373 438.24207 292.68066 438.39246 curveto
+292.20215 438.54286 291.68945 438.77072 291.14258 439.07605 curveto
+291.14258 437.68152 lineto
+291.69857 437.45822 292.2181 437.2896 292.70117 437.17566 curveto
+293.18424 437.06174 293.6263 437.00477 294.02734 437.00476 curveto
+295.08463 437.00477 295.92773 437.26909 296.55664 437.79773 curveto
+297.18554 438.32638 297.49999 439.03276 297.5 439.91687 curveto
+297.49999 440.33615 297.42024 440.73491 297.26074 441.11316 curveto
+297.10579 441.48686 296.82096 441.92892 296.40625 442.43933 curveto
+296.29231 442.5715 295.93001 442.95431 295.31934 443.58777 curveto
+294.70865 444.21668 293.84733 445.09851 292.73535 446.23328 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+300.60352 445.65906 moveto
+302.0459 445.65906 lineto
+302.0459 446.83484 lineto
+300.9248 449.02234 lineto
+300.04297 449.02234 lineto
+300.60352 446.83484 lineto
+300.60352 445.65906 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+307.70605 440.62097 moveto
+307.03157 440.62098 306.49837 440.8853 306.10645 441.41394 curveto
+305.71452 441.93803 305.51855 442.65809 305.51855 443.5741 curveto
+305.51855 444.49012 305.71224 445.21245 306.09961 445.74109 curveto
+306.49153 446.26518 307.02701 446.52722 307.70605 446.52722 curveto
+308.37597 446.52722 308.9069 446.2629 309.29883 445.73425 curveto
+309.69075 445.20561 309.88671 444.48556 309.88672 443.5741 curveto
+309.88671 442.6672 309.69075 441.94943 309.29883 441.42078 curveto
+308.9069 440.88758 308.37597 440.62098 307.70605 440.62097 curveto
+307.70605 439.55457 moveto
+308.7998 439.55457 309.65885 439.91004 310.2832 440.62097 curveto
+310.90754 441.33192 311.21972 442.31629 311.21973 443.5741 curveto
+311.21972 444.82735 310.90754 445.81173 310.2832 446.52722 curveto
+309.65885 447.23816 308.7998 447.59363 307.70605 447.59363 curveto
+306.60774 447.59363 305.74642 447.23816 305.12207 446.52722 curveto
+304.50228 445.81173 304.19238 444.82735 304.19238 443.5741 curveto
+304.19238 442.31629 304.50228 441.33192 305.12207 440.62097 curveto
+305.74642 439.91004 306.60774 439.55457 307.70605 439.55457 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+313.09961 436.77234 moveto
+314.19336 436.77234 lineto
+314.87695 437.84787 315.38737 438.9006 315.72461 439.93054 curveto
+316.0664 440.9605 316.2373 441.98361 316.2373 442.99988 curveto
+316.2373 444.02071 316.0664 445.04838 315.72461 446.08289 curveto
+315.38737 447.11739 314.87695 448.17012 314.19336 449.24109 curveto
+313.09961 449.24109 lineto
+313.70573 448.19747 314.1569 447.16069 314.45312 446.13074 curveto
+314.7539 445.09623 314.90429 444.05262 314.9043 442.99988 curveto
+314.90429 441.94715 314.7539 440.90809 314.45312 439.88269 curveto
+314.1569 438.85731 313.70573 437.82053 313.09961 436.77234 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+181.16895 456.3075 moveto
+180.55826 457.35568 180.10481 458.39246 179.80859 459.41785 curveto
+179.51237 460.44324 179.36426 461.48231 179.36426 462.53503 curveto
+179.36426 463.58777 179.51237 464.63139 179.80859 465.66589 curveto
+180.10937 466.69584 180.56282 467.73262 181.16895 468.77625 curveto
+180.0752 468.77625 lineto
+179.3916 467.70528 178.8789 466.65255 178.53711 465.61804 curveto
+178.19987 464.58354 178.03125 463.55587 178.03125 462.53503 curveto
+178.03125 461.51876 178.19987 460.49565 178.53711 459.4657 curveto
+178.87435 458.43576 179.38704 457.38303 180.0752 456.3075 curveto
+181.16895 456.3075 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+183.80762 456.72449 moveto
+189.22852 456.72449 lineto
+189.22852 457.8866 lineto
+185.07227 457.8866 lineto
+185.07227 460.38855 lineto
+185.27278 460.3202 185.4733 460.27007 185.67383 460.23816 curveto
+185.87435 460.20171 186.07487 460.18348 186.27539 460.18347 curveto
+187.41471 460.18348 188.31705 460.49565 188.98242 461.12 curveto
+189.64778 461.74435 189.98046 462.58973 189.98047 463.65613 curveto
+189.98046 464.75444 189.63866 465.60893 188.95508 466.2196 curveto
+188.27148 466.82572 187.30761 467.12878 186.06348 467.12878 curveto
+185.63509 467.12878 185.19759 467.09233 184.75098 467.01941 curveto
+184.30892 466.94649 183.85091 466.83712 183.37695 466.69128 curveto
+183.37695 465.30359 lineto
+183.78711 465.5269 184.21094 465.69324 184.64844 465.80261 curveto
+185.08593 465.91199 185.5485 465.96668 186.03613 465.96667 curveto
+186.82454 465.96668 187.44889 465.75932 187.90918 465.3446 curveto
+188.36946 464.92989 188.5996 464.36707 188.59961 463.65613 curveto
+188.5996 462.94519 188.36946 462.38237 187.90918 461.96765 curveto
+187.44889 461.55294 186.82454 461.34559 186.03613 461.34558 curveto
+185.66699 461.34559 185.29785 461.3866 184.92871 461.46863 curveto
+184.56413 461.55066 184.19043 461.67827 183.80762 461.85144 curveto
+183.80762 456.72449 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+192.85156 465.19421 moveto
+194.29395 465.19421 lineto
+194.29395 466.37 lineto
+193.17285 468.5575 lineto
+192.29102 468.5575 lineto
+192.85156 466.37 lineto
+192.85156 465.19421 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+201.34863 461.42761 moveto
+202.00943 461.56889 202.52441 461.86284 202.89355 462.30945 curveto
+203.26725 462.75607 203.45409 463.3075 203.4541 463.96375 curveto
+203.45409 464.97091 203.10774 465.7502 202.41504 466.30164 curveto
+201.72232 466.85307 200.73795 467.12878 199.46191 467.12878 curveto
+199.03353 467.12878 198.59147 467.08549 198.13574 466.9989 curveto
+197.68457 466.91687 197.21745 466.79154 196.73438 466.62292 curveto
+196.73438 465.28992 lineto
+197.11719 465.51323 197.53646 465.68185 197.99219 465.79578 curveto
+198.44791 465.90971 198.92415 465.96668 199.4209 465.96667 curveto
+200.28678 465.96668 200.94531 465.79578 201.39648 465.45398 curveto
+201.85221 465.11218 202.08007 464.61544 202.08008 463.96375 curveto
+202.08007 463.36219 201.86816 462.89279 201.44434 462.55554 curveto
+201.02506 462.21375 200.43945 462.04285 199.6875 462.04285 curveto
+198.49805 462.04285 lineto
+198.49805 460.90808 lineto
+199.74219 460.90808 lineto
+200.42122 460.90809 200.94075 460.77365 201.30078 460.50476 curveto
+201.6608 460.23133 201.84081 459.8394 201.84082 459.32898 curveto
+201.84081 458.8049 201.65397 458.40386 201.28027 458.12585 curveto
+200.91113 457.84331 200.3802 457.70204 199.6875 457.70203 curveto
+199.30924 457.70204 198.90364 457.74305 198.4707 457.82507 curveto
+198.03776 457.90711 197.56152 458.03472 197.04199 458.20789 curveto
+197.04199 456.97742 lineto
+197.56608 456.83159 198.05599 456.72222 198.51172 456.64929 curveto
+198.972 456.57639 199.40494 456.53993 199.81055 456.53992 curveto
+200.85872 456.53993 201.68815 456.77918 202.29883 457.25769 curveto
+202.9095 457.73166 203.21484 458.37424 203.21484 459.18542 curveto
+203.21484 459.75054 203.05305 460.22905 202.72949 460.62097 curveto
+202.40592 461.00835 201.94563 461.27723 201.34863 461.42761 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+206.22266 465.19421 moveto
+207.66504 465.19421 lineto
+207.66504 466.37 lineto
+206.54395 468.5575 lineto
+205.66211 468.5575 lineto
+206.22266 466.37 lineto
+206.22266 465.19421 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+213.3252 460.15613 moveto
+212.65071 460.15613 212.11751 460.42046 211.72559 460.9491 curveto
+211.33366 461.47319 211.13769 462.19324 211.1377 463.10925 curveto
+211.13769 464.02527 211.33138 464.7476 211.71875 465.27625 curveto
+212.11067 465.80033 212.64616 466.06238 213.3252 466.06238 curveto
+213.99511 466.06238 214.52604 465.79806 214.91797 465.26941 curveto
+215.30989 464.74077 215.50585 464.02071 215.50586 463.10925 curveto
+215.50585 462.20236 215.30989 461.48458 214.91797 460.95593 curveto
+214.52604 460.42274 213.99511 460.15613 213.3252 460.15613 curveto
+213.3252 459.08972 moveto
+214.41894 459.08973 215.27799 459.4452 215.90234 460.15613 curveto
+216.52669 460.86707 216.83886 461.85145 216.83887 463.10925 curveto
+216.83886 464.36251 216.52669 465.34688 215.90234 466.06238 curveto
+215.27799 466.77332 214.41894 467.12878 213.3252 467.12878 curveto
+212.22688 467.12878 211.36556 466.77332 210.74121 466.06238 curveto
+210.12142 465.34688 209.81152 464.36251 209.81152 463.10925 curveto
+209.81152 461.85145 210.12142 460.86707 210.74121 460.15613 curveto
+211.36556 459.4452 212.22688 459.08973 213.3252 459.08972 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+218.71875 456.3075 moveto
+219.8125 456.3075 lineto
+220.49609 457.38303 221.00651 458.43576 221.34375 459.4657 curveto
+221.68554 460.49565 221.85644 461.51876 221.85645 462.53503 curveto
+221.85644 463.55587 221.68554 464.58354 221.34375 465.61804 curveto
+221.00651 466.65255 220.49609 467.70528 219.8125 468.77625 curveto
+218.71875 468.77625 lineto
+219.32487 467.73262 219.77604 466.69584 220.07227 465.66589 curveto
+220.37304 464.63139 220.52343 463.58777 220.52344 462.53503 curveto
+220.52343 461.48231 220.37304 460.44324 220.07227 459.41785 curveto
+219.77604 458.39246 219.32487 457.35568 218.71875 456.3075 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+228.20898 455.77429 moveto
+227.5983 456.82248 227.14485 457.85926 226.84863 458.88464 curveto
+226.55241 459.91004 226.40429 460.9491 226.4043 462.00183 curveto
+226.40429 463.05457 226.55241 464.09819 226.84863 465.13269 curveto
+227.14941 466.16264 227.60286 467.19942 228.20898 468.24304 curveto
+227.11523 468.24304 lineto
+226.43164 467.17208 225.91894 466.11934 225.57715 465.08484 curveto
+225.23991 464.05034 225.07129 463.02267 225.07129 462.00183 curveto
+225.07129 460.98556 225.23991 459.96245 225.57715 458.9325 curveto
+225.91439 457.90256 226.42708 456.84982 227.11523 455.77429 curveto
+228.20898 455.77429 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+233.95801 460.74402 moveto
+233.33821 460.74402 232.84603 460.95594 232.48145 461.37976 curveto
+232.12142 461.80359 231.9414 462.38465 231.94141 463.12292 curveto
+231.9414 463.85665 232.12142 464.43771 232.48145 464.86609 curveto
+232.84603 465.28992 233.33821 465.50183 233.95801 465.50183 curveto
+234.57779 465.50183 235.0677 465.28992 235.42773 464.86609 curveto
+235.79231 464.43771 235.9746 463.85665 235.97461 463.12292 curveto
+235.9746 462.38465 235.79231 461.80359 235.42773 461.37976 curveto
+235.0677 460.95594 234.57779 460.74402 233.95801 460.74402 curveto
+236.69922 456.41687 moveto
+236.69922 457.67468 lineto
+236.35286 457.51063 236.00195 457.3853 235.64648 457.29871 curveto
+235.29557 457.21213 234.94693 457.16883 234.60059 457.16882 curveto
+233.68912 457.16883 232.99186 457.47645 232.50879 458.09167 curveto
+232.03027 458.70692 231.75683 459.6366 231.68848 460.88074 curveto
+231.95735 460.48426 232.29459 460.1812 232.7002 459.97156 curveto
+233.10579 459.75737 233.5524 459.65028 234.04004 459.65027 curveto
+235.06542 459.65028 235.87434 459.96245 236.4668 460.58679 curveto
+237.06379 461.20659 237.3623 462.05197 237.3623 463.12292 curveto
+237.3623 464.1711 237.0524 465.01192 236.43262 465.64539 curveto
+235.81282 466.27885 234.98795 466.59558 233.95801 466.59558 curveto
+232.77767 466.59558 231.87532 466.14441 231.25098 465.24207 curveto
+230.62663 464.33517 230.31445 463.02267 230.31445 461.30457 curveto
+230.31445 459.69129 230.69726 458.40614 231.46289 457.4491 curveto
+232.22851 456.48752 233.25618 456.00672 234.5459 456.00671 curveto
+234.89225 456.00672 235.24088 456.0409 235.5918 456.10925 curveto
+235.94726 456.17762 236.3164 456.28016 236.69922 456.41687 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+239.8916 464.66101 moveto
+241.33398 464.66101 lineto
+241.33398 465.83679 lineto
+240.21289 468.02429 lineto
+239.33105 468.02429 lineto
+239.8916 465.83679 lineto
+239.8916 464.66101 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+247.99902 457.39441 moveto
+244.5127 462.84265 lineto
+247.99902 462.84265 lineto
+247.99902 457.39441 lineto
+247.63672 456.19128 moveto
+249.37305 456.19128 lineto
+249.37305 462.84265 lineto
+250.8291 462.84265 lineto
+250.8291 463.99109 lineto
+249.37305 463.99109 lineto
+249.37305 466.39734 lineto
+247.99902 466.39734 lineto
+247.99902 463.99109 lineto
+243.3916 463.99109 lineto
+243.3916 462.65808 lineto
+247.63672 456.19128 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+253.2627 464.66101 moveto
+254.70508 464.66101 lineto
+254.70508 465.83679 lineto
+253.58398 468.02429 lineto
+252.70215 468.02429 lineto
+253.2627 465.83679 lineto
+253.2627 464.66101 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+262.43652 462.48035 moveto
+262.43652 461.56889 262.24739 460.86251 261.86914 460.36121 curveto
+261.49544 459.85991 260.96907 459.60926 260.29004 459.60925 curveto
+259.61556 459.60926 259.08919 459.85991 258.71094 460.36121 curveto
+258.33724 460.86251 258.15039 461.56889 258.15039 462.48035 curveto
+258.15039 463.38725 258.33724 464.09135 258.71094 464.59265 curveto
+259.08919 465.09395 259.61556 465.34461 260.29004 465.3446 curveto
+260.96907 465.34461 261.49544 465.09395 261.86914 464.59265 curveto
+262.24739 464.09135 262.43652 463.38725 262.43652 462.48035 curveto
+263.69434 465.44714 moveto
+263.69433 466.75053 263.40494 467.71895 262.82617 468.35242 curveto
+262.24739 468.99044 261.361 469.30945 260.16699 469.30945 curveto
+259.72493 469.30945 259.30794 469.27527 258.91602 469.20691 curveto
+258.52409 469.1431 258.14355 469.04284 257.77441 468.90613 curveto
+257.77441 467.6825 lineto
+258.14355 467.88301 258.50814 468.03113 258.86816 468.12683 curveto
+259.22819 468.22253 259.59505 468.27038 259.96875 468.27039 curveto
+260.79362 468.27038 261.41113 468.05391 261.82129 467.62097 curveto
+262.23144 467.19259 262.43652 466.54317 262.43652 465.67273 curveto
+262.43652 465.05066 lineto
+262.17675 465.50183 261.84407 465.83907 261.43848 466.06238 curveto
+261.03287 466.28569 260.54752 466.39734 259.98242 466.39734 curveto
+259.04362 466.39734 258.28711 466.03959 257.71289 465.3241 curveto
+257.13867 464.6086 256.85156 463.66069 256.85156 462.48035 curveto
+256.85156 461.29546 257.13867 460.34526 257.71289 459.62976 curveto
+258.28711 458.91427 259.04362 458.55653 259.98242 458.55652 curveto
+260.54752 458.55653 261.03287 458.66818 261.43848 458.89148 curveto
+261.84407 459.11479 262.17675 459.45203 262.43652 459.9032 curveto
+262.43652 458.74109 lineto
+263.69434 458.74109 lineto
+263.69434 465.44714 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+266.08691 455.77429 moveto
+267.18066 455.77429 lineto
+267.86425 456.84982 268.37467 457.90256 268.71191 458.9325 curveto
+269.05371 459.96245 269.22461 460.98556 269.22461 462.00183 curveto
+269.22461 463.02267 269.05371 464.05034 268.71191 465.08484 curveto
+268.37467 466.11934 267.86425 467.17208 267.18066 468.24304 curveto
+266.08691 468.24304 lineto
+266.69303 467.19942 267.1442 466.16264 267.44043 465.13269 curveto
+267.74121 464.09819 267.8916 463.05457 267.8916 462.00183 curveto
+267.8916 460.9491 267.74121 459.91004 267.44043 458.88464 curveto
+267.1442 457.85926 266.69303 456.82248 266.08691 455.77429 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+275.57715 455.77429 moveto
+274.96647 456.82248 274.51302 457.85926 274.2168 458.88464 curveto
+273.92057 459.91004 273.77246 460.9491 273.77246 462.00183 curveto
+273.77246 463.05457 273.92057 464.09819 274.2168 465.13269 curveto
+274.51757 466.16264 274.97102 467.19942 275.57715 468.24304 curveto
+274.4834 468.24304 lineto
+273.7998 467.17208 273.28711 466.11934 272.94531 465.08484 curveto
+272.60807 464.05034 272.43945 463.02267 272.43945 462.00183 curveto
+272.43945 460.98556 272.60807 459.96245 272.94531 458.9325 curveto
+273.28255 457.90256 273.79524 456.84982 274.4834 455.77429 curveto
+275.57715 455.77429 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+281.15527 461.55066 moveto
+280.49902 461.55066 279.98177 461.72612 279.60352 462.07703 curveto
+279.22982 462.42794 279.04297 462.91101 279.04297 463.52625 curveto
+279.04297 464.14148 279.22982 464.62455 279.60352 464.97546 curveto
+279.98177 465.32638 280.49902 465.50183 281.15527 465.50183 curveto
+281.81152 465.50183 282.32877 465.32638 282.70703 464.97546 curveto
+283.08528 464.62 283.27441 464.13692 283.27441 463.52625 curveto
+283.27441 462.91101 283.08528 462.42794 282.70703 462.07703 curveto
+282.33333 461.72612 281.81608 461.55066 281.15527 461.55066 curveto
+279.77441 460.96277 moveto
+279.18196 460.81694 278.7194 460.54122 278.38672 460.13562 curveto
+278.05859 459.73003 277.89453 459.23556 277.89453 458.65222 curveto
+277.89453 457.83648 278.18392 457.19162 278.7627 456.71765 curveto
+279.34603 456.2437 280.14355 456.00672 281.15527 456.00671 curveto
+282.17154 456.00672 282.96907 456.2437 283.54785 456.71765 curveto
+284.12662 457.19162 284.41601 457.83648 284.41602 458.65222 curveto
+284.41601 459.23556 284.24967 459.73003 283.91699 460.13562 curveto
+283.58886 460.54122 283.13085 460.81694 282.54297 460.96277 curveto
+283.20833 461.11772 283.72558 461.42078 284.09473 461.87195 curveto
+284.46842 462.32312 284.65527 462.87456 284.65527 463.52625 curveto
+284.65527 464.51518 284.35221 465.27397 283.74609 465.80261 curveto
+283.14452 466.33126 282.28092 466.59558 281.15527 466.59558 curveto
+280.02962 466.59558 279.16373 466.33126 278.55762 465.80261 curveto
+277.95605 465.27397 277.65527 464.51518 277.65527 463.52625 curveto
+277.65527 462.87456 277.84212 462.32312 278.21582 461.87195 curveto
+278.58952 461.42078 279.10905 461.11772 279.77441 460.96277 curveto
+279.26855 458.7821 moveto
+279.26855 459.31076 279.43261 459.72319 279.76074 460.01941 curveto
+280.09342 460.31564 280.55826 460.46375 281.15527 460.46375 curveto
+281.74772 460.46375 282.21028 460.31564 282.54297 460.01941 curveto
+282.8802 459.72319 283.04882 459.31076 283.04883 458.7821 curveto
+283.04882 458.25347 282.8802 457.84103 282.54297 457.5448 curveto
+282.21028 457.24858 281.74772 457.10047 281.15527 457.10046 curveto
+280.55826 457.10047 280.09342 457.24858 279.76074 457.5448 curveto
+279.43261 457.84103 279.26855 458.25347 279.26855 458.7821 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+287.25977 464.66101 moveto
+288.70215 464.66101 lineto
+288.70215 465.83679 lineto
+287.58105 468.02429 lineto
+286.69922 468.02429 lineto
+287.25977 465.83679 lineto
+287.25977 464.66101 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+291.58691 456.19128 moveto
+297.00781 456.19128 lineto
+297.00781 457.35339 lineto
+292.85156 457.35339 lineto
+292.85156 459.85535 lineto
+293.05208 459.78699 293.2526 459.73686 293.45312 459.70496 curveto
+293.65364 459.6685 293.85416 459.65028 294.05469 459.65027 curveto
+295.19401 459.65028 296.09635 459.96245 296.76172 460.58679 curveto
+297.42708 461.21115 297.75976 462.05652 297.75977 463.12292 curveto
+297.75976 464.22123 297.41796 465.07573 296.73438 465.6864 curveto
+296.05078 466.29252 295.08691 466.59558 293.84277 466.59558 curveto
+293.41438 466.59558 292.97689 466.55912 292.53027 466.48621 curveto
+292.08821 466.41329 291.63021 466.30391 291.15625 466.15808 curveto
+291.15625 464.77039 lineto
+291.5664 464.99369 291.99023 465.16004 292.42773 465.26941 curveto
+292.86523 465.37879 293.3278 465.43347 293.81543 465.43347 curveto
+294.60384 465.43347 295.22818 465.22612 295.68848 464.8114 curveto
+296.14876 464.39669 296.3789 463.83386 296.37891 463.12292 curveto
+296.3789 462.41199 296.14876 461.84917 295.68848 461.43445 curveto
+295.22818 461.01974 294.60384 460.81238 293.81543 460.81238 curveto
+293.44629 460.81238 293.07715 460.8534 292.70801 460.93542 curveto
+292.34342 461.01746 291.96972 461.14507 291.58691 461.31824 curveto
+291.58691 456.19128 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+300.63086 464.66101 moveto
+302.07324 464.66101 lineto
+302.07324 465.83679 lineto
+300.95215 468.02429 lineto
+300.07031 468.02429 lineto
+300.63086 465.83679 lineto
+300.63086 464.66101 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+309.80469 462.48035 moveto
+309.80468 461.56889 309.61555 460.86251 309.2373 460.36121 curveto
+308.8636 459.85991 308.33723 459.60926 307.6582 459.60925 curveto
+306.98372 459.60926 306.45735 459.85991 306.0791 460.36121 curveto
+305.7054 460.86251 305.51855 461.56889 305.51855 462.48035 curveto
+305.51855 463.38725 305.7054 464.09135 306.0791 464.59265 curveto
+306.45735 465.09395 306.98372 465.34461 307.6582 465.3446 curveto
+308.33723 465.34461 308.8636 465.09395 309.2373 464.59265 curveto
+309.61555 464.09135 309.80468 463.38725 309.80469 462.48035 curveto
+311.0625 465.44714 moveto
+311.06249 466.75053 310.7731 467.71895 310.19434 468.35242 curveto
+309.61555 468.99044 308.72916 469.30945 307.53516 469.30945 curveto
+307.0931 469.30945 306.6761 469.27527 306.28418 469.20691 curveto
+305.89225 469.1431 305.51172 469.04284 305.14258 468.90613 curveto
+305.14258 467.6825 lineto
+305.51172 467.88301 305.8763 468.03113 306.23633 468.12683 curveto
+306.59635 468.22253 306.96321 468.27038 307.33691 468.27039 curveto
+308.16178 468.27038 308.77929 468.05391 309.18945 467.62097 curveto
+309.5996 467.19259 309.80468 466.54317 309.80469 465.67273 curveto
+309.80469 465.05066 lineto
+309.54492 465.50183 309.21223 465.83907 308.80664 466.06238 curveto
+308.40104 466.28569 307.91569 466.39734 307.35059 466.39734 curveto
+306.41178 466.39734 305.65527 466.03959 305.08105 465.3241 curveto
+304.50683 464.6086 304.21973 463.66069 304.21973 462.48035 curveto
+304.21973 461.29546 304.50683 460.34526 305.08105 459.62976 curveto
+305.65527 458.91427 306.41178 458.55653 307.35059 458.55652 curveto
+307.91569 458.55653 308.40104 458.66818 308.80664 458.89148 curveto
+309.21223 459.11479 309.54492 459.45203 309.80469 459.9032 curveto
+309.80469 458.74109 lineto
+311.0625 458.74109 lineto
+311.0625 465.44714 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+313.45508 455.77429 moveto
+314.54883 455.77429 lineto
+315.23242 456.84982 315.74284 457.90256 316.08008 458.9325 curveto
+316.42187 459.96245 316.59277 460.98556 316.59277 462.00183 curveto
+316.59277 463.02267 316.42187 464.05034 316.08008 465.08484 curveto
+315.74284 466.11934 315.23242 467.17208 314.54883 468.24304 curveto
+313.45508 468.24304 lineto
+314.0612 467.19942 314.51237 466.16264 314.80859 465.13269 curveto
+315.10937 464.09819 315.25976 463.05457 315.25977 462.00183 curveto
+315.25976 460.9491 315.10937 459.91004 314.80859 458.88464 curveto
+314.51237 457.85926 314.0612 456.82248 313.45508 455.77429 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+174.3125 475.84265 moveto
+173.70182 476.89084 173.24837 477.92762 172.95215 478.953 curveto
+172.65592 479.9784 172.50781 481.01746 172.50781 482.07019 curveto
+172.50781 483.12293 172.65592 484.16655 172.95215 485.20105 curveto
+173.25293 486.231 173.70638 487.26778 174.3125 488.3114 curveto
+173.21875 488.3114 lineto
+172.53515 487.24044 172.02246 486.1877 171.68066 485.1532 curveto
+171.34342 484.1187 171.1748 483.09103 171.1748 482.07019 curveto
+171.1748 481.05392 171.34342 480.03081 171.68066 479.00085 curveto
+172.0179 477.97092 172.5306 476.91818 173.21875 475.84265 curveto
+174.3125 475.84265 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+176.97852 486.25378 moveto
+176.97852 484.99597 lineto
+177.32487 485.16004 177.67578 485.28536 178.03125 485.37195 curveto
+178.38672 485.45854 178.73535 485.50183 179.07715 485.50183 curveto
+179.9886 485.50183 180.68359 485.19649 181.16211 484.58582 curveto
+181.64518 483.97058 181.92089 483.03862 181.98926 481.78992 curveto
+181.72493 482.18185 181.38997 482.48263 180.98438 482.69226 curveto
+180.57877 482.9019 180.12988 483.00672 179.6377 483.00671 curveto
+178.61686 483.00672 177.80794 482.6991 177.21094 482.08386 curveto
+176.61849 481.46408 176.32226 480.6187 176.32227 479.54773 curveto
+176.32226 478.49956 176.63216 477.65874 177.25195 477.02527 curveto
+177.87174 476.39182 178.69661 476.07508 179.72656 476.07507 curveto
+180.9069 476.07508 181.80696 476.52853 182.42676 477.43542 curveto
+183.0511 478.33778 183.36327 479.65028 183.36328 481.37292 curveto
+183.36327 482.98165 182.98046 484.26681 182.21484 485.22839 curveto
+181.45377 486.18543 180.42838 486.66394 179.13867 486.66394 curveto
+178.79231 486.66394 178.4414 486.62976 178.08594 486.5614 curveto
+177.73047 486.49304 177.36133 486.3905 176.97852 486.25378 curveto
+179.72656 481.92664 moveto
+180.34635 481.92664 180.83626 481.71473 181.19629 481.29089 curveto
+181.56087 480.86707 181.74316 480.28602 181.74316 479.54773 curveto
+181.74316 478.81401 181.56087 478.23524 181.19629 477.8114 curveto
+180.83626 477.38303 180.34635 477.16883 179.72656 477.16882 curveto
+179.10677 477.16883 178.61458 477.38303 178.25 477.8114 curveto
+177.88997 478.23524 177.70996 478.81401 177.70996 479.54773 curveto
+177.70996 480.28602 177.88997 480.86707 178.25 481.29089 curveto
+178.61458 481.71473 179.10677 481.92664 179.72656 481.92664 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+185.99512 484.72937 moveto
+187.4375 484.72937 lineto
+187.4375 485.90515 lineto
+186.31641 488.09265 lineto
+185.43457 488.09265 lineto
+185.99512 485.90515 lineto
+185.99512 484.72937 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+193.43262 480.81238 moveto
+192.81282 480.81238 192.32063 481.0243 191.95605 481.44812 curveto
+191.59603 481.87195 191.41601 482.45301 191.41602 483.19128 curveto
+191.41601 483.92501 191.59603 484.50606 191.95605 484.93445 curveto
+192.32063 485.35828 192.81282 485.57019 193.43262 485.57019 curveto
+194.0524 485.57019 194.54231 485.35828 194.90234 484.93445 curveto
+195.26692 484.50606 195.44921 483.92501 195.44922 483.19128 curveto
+195.44921 482.45301 195.26692 481.87195 194.90234 481.44812 curveto
+194.54231 481.0243 194.0524 480.81238 193.43262 480.81238 curveto
+196.17383 476.48523 moveto
+196.17383 477.74304 lineto
+195.82747 477.57899 195.47656 477.45366 195.12109 477.36707 curveto
+194.77018 477.28049 194.42154 477.23719 194.0752 477.23718 curveto
+193.16373 477.23719 192.46647 477.54481 191.9834 478.16003 curveto
+191.50488 478.77528 191.23144 479.70496 191.16309 480.9491 curveto
+191.43196 480.55262 191.7692 480.24956 192.1748 480.03992 curveto
+192.5804 479.82573 193.02701 479.71863 193.51465 479.71863 curveto
+194.54003 479.71863 195.34895 480.03081 195.94141 480.65515 curveto
+196.5384 481.27495 196.83691 482.12032 196.83691 483.19128 curveto
+196.83691 484.23946 196.52701 485.08028 195.90723 485.71375 curveto
+195.28743 486.34721 194.46256 486.66394 193.43262 486.66394 curveto
+192.25228 486.66394 191.34993 486.21277 190.72559 485.31042 curveto
+190.10124 484.40353 189.78906 483.09103 189.78906 481.37292 curveto
+189.78906 479.75965 190.17187 478.4745 190.9375 477.51746 curveto
+191.70312 476.55588 192.73079 476.07508 194.02051 476.07507 curveto
+194.36686 476.07508 194.71549 476.10926 195.06641 476.17761 curveto
+195.42187 476.24598 195.79101 476.34852 196.17383 476.48523 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+199.36621 484.72937 moveto
+200.80859 484.72937 lineto
+200.80859 485.90515 lineto
+199.6875 488.09265 lineto
+198.80566 488.09265 lineto
+199.36621 485.90515 lineto
+199.36621 484.72937 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+206.46875 479.69128 moveto
+205.79427 479.69129 205.26106 479.95561 204.86914 480.48425 curveto
+204.47721 481.00835 204.28125 481.7284 204.28125 482.64441 curveto
+204.28125 483.56043 204.47493 484.28276 204.8623 484.8114 curveto
+205.25423 485.33549 205.78971 485.59754 206.46875 485.59753 curveto
+207.13867 485.59754 207.66959 485.33321 208.06152 484.80457 curveto
+208.45344 484.27592 208.64941 483.55587 208.64941 482.64441 curveto
+208.64941 481.73751 208.45344 481.01974 208.06152 480.49109 curveto
+207.66959 479.95789 207.13867 479.69129 206.46875 479.69128 curveto
+206.46875 478.62488 moveto
+207.56249 478.62489 208.42154 478.98035 209.0459 479.69128 curveto
+209.67024 480.40223 209.98241 481.3866 209.98242 482.64441 curveto
+209.98241 483.89767 209.67024 484.88204 209.0459 485.59753 curveto
+208.42154 486.30847 207.56249 486.66394 206.46875 486.66394 curveto
+205.37044 486.66394 204.50911 486.30847 203.88477 485.59753 curveto
+203.26497 484.88204 202.95508 483.89767 202.95508 482.64441 curveto
+202.95508 481.3866 203.26497 480.40223 203.88477 479.69128 curveto
+204.50911 478.98035 205.37044 478.62489 206.46875 478.62488 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+211.8623 475.84265 moveto
+212.95605 475.84265 lineto
+213.63965 476.91818 214.15006 477.97092 214.4873 479.00085 curveto
+214.8291 480.03081 215 481.05392 215 482.07019 curveto
+215 483.09103 214.8291 484.1187 214.4873 485.1532 curveto
+214.15006 486.1877 213.63965 487.24044 212.95605 488.3114 curveto
+211.8623 488.3114 lineto
+212.46842 487.26778 212.91959 486.231 213.21582 485.20105 curveto
+213.5166 484.16655 213.66699 483.12293 213.66699 482.07019 curveto
+213.66699 481.01746 213.5166 479.9784 213.21582 478.953 curveto
+212.91959 477.92762 212.46842 476.89084 211.8623 475.84265 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+222.12305 476.34265 moveto
+221.51237 477.39084 221.05892 478.42762 220.7627 479.453 curveto
+220.46647 480.4784 220.31836 481.51746 220.31836 482.57019 curveto
+220.31836 483.62293 220.46647 484.66655 220.7627 485.70105 curveto
+221.06347 486.731 221.51692 487.76778 222.12305 488.8114 curveto
+221.0293 488.8114 lineto
+220.3457 487.74044 219.83301 486.6877 219.49121 485.6532 curveto
+219.15397 484.6187 218.98535 483.59103 218.98535 482.57019 curveto
+218.98535 481.55392 219.15397 480.53081 219.49121 479.50085 curveto
+219.82845 478.47092 220.34114 477.41818 221.0293 476.34265 curveto
+222.12305 476.34265 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+224.9873 485.80359 moveto
+227.24316 485.80359 lineto
+227.24316 478.01746 lineto
+224.78906 478.50964 lineto
+224.78906 477.25183 lineto
+227.22949 476.75964 lineto
+228.61035 476.75964 lineto
+228.61035 485.80359 lineto
+230.86621 485.80359 lineto
+230.86621 486.9657 lineto
+224.9873 486.9657 lineto
+224.9873 485.80359 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+236.61523 477.66882 moveto
+235.90429 477.66883 235.36881 478.01974 235.00879 478.72156 curveto
+234.65332 479.41883 234.47558 480.46929 234.47559 481.87292 curveto
+234.47558 483.27202 234.65332 484.32247 235.00879 485.02429 curveto
+235.36881 485.72156 235.90429 486.07019 236.61523 486.07019 curveto
+237.33072 486.07019 237.86621 485.72156 238.22168 485.02429 curveto
+238.5817 484.32247 238.76171 483.27202 238.76172 481.87292 curveto
+238.76171 480.46929 238.5817 479.41883 238.22168 478.72156 curveto
+237.86621 478.01974 237.33072 477.66883 236.61523 477.66882 curveto
+236.61523 476.57507 moveto
+237.75911 476.57508 238.63183 477.02853 239.2334 477.93542 curveto
+239.83951 478.83778 240.14257 480.15028 240.14258 481.87292 curveto
+240.14257 483.59103 239.83951 484.90353 239.2334 485.81042 curveto
+238.63183 486.71277 237.75911 487.16394 236.61523 487.16394 curveto
+235.47135 487.16394 234.59635 486.71277 233.99023 485.81042 curveto
+233.38867 484.90353 233.08789 483.59103 233.08789 481.87292 curveto
+233.08789 480.15028 233.38867 478.83778 233.99023 477.93542 curveto
+234.59635 477.02853 235.47135 476.57508 236.61523 476.57507 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+242.71973 485.22937 moveto
+244.16211 485.22937 lineto
+244.16211 486.40515 lineto
+243.04102 488.59265 lineto
+242.15918 488.59265 lineto
+242.71973 486.40515 lineto
+242.71973 485.22937 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+246.68457 476.75964 moveto
+253.24707 476.75964 lineto
+253.24707 477.34753 lineto
+249.54199 486.9657 lineto
+248.09961 486.9657 lineto
+251.58594 477.92175 lineto
+246.68457 477.92175 lineto
+246.68457 476.75964 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+256.09082 485.22937 moveto
+257.5332 485.22937 lineto
+257.5332 486.40515 lineto
+256.41211 488.59265 lineto
+255.53027 488.59265 lineto
+256.09082 486.40515 lineto
+256.09082 485.22937 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+263.19336 480.19128 moveto
+262.51888 480.19129 261.98567 480.45561 261.59375 480.98425 curveto
+261.20182 481.50835 261.00586 482.2284 261.00586 483.14441 curveto
+261.00586 484.06043 261.19954 484.78276 261.58691 485.3114 curveto
+261.97884 485.83549 262.51432 486.09754 263.19336 486.09753 curveto
+263.86328 486.09754 264.3942 485.83321 264.78613 485.30457 curveto
+265.17805 484.77592 265.37402 484.05587 265.37402 483.14441 curveto
+265.37402 482.23751 265.17805 481.51974 264.78613 480.99109 curveto
+264.3942 480.45789 263.86328 480.19129 263.19336 480.19128 curveto
+263.19336 479.12488 moveto
+264.2871 479.12489 265.14615 479.48035 265.77051 480.19128 curveto
+266.39485 480.90223 266.70702 481.8866 266.70703 483.14441 curveto
+266.70702 484.39767 266.39485 485.38204 265.77051 486.09753 curveto
+265.14615 486.80847 264.2871 487.16394 263.19336 487.16394 curveto
+262.09505 487.16394 261.23372 486.80847 260.60938 486.09753 curveto
+259.98958 485.38204 259.67969 484.39767 259.67969 483.14441 curveto
+259.67969 481.8866 259.98958 480.90223 260.60938 480.19128 curveto
+261.23372 479.48035 262.09505 479.12489 263.19336 479.12488 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+268.58691 476.34265 moveto
+269.68066 476.34265 lineto
+270.36425 477.41818 270.87467 478.47092 271.21191 479.50085 curveto
+271.55371 480.53081 271.72461 481.55392 271.72461 482.57019 curveto
+271.72461 483.59103 271.55371 484.6187 271.21191 485.6532 curveto
+270.87467 486.6877 270.36425 487.74044 269.68066 488.8114 curveto
+268.58691 488.8114 lineto
+269.19303 487.76778 269.6442 486.731 269.94043 485.70105 curveto
+270.24121 484.66655 270.3916 483.62293 270.3916 482.57019 curveto
+270.3916 481.51746 270.24121 480.4784 269.94043 479.453 curveto
+269.6442 478.42762 269.19303 477.39084 268.58691 476.34265 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+276.49121 476.34265 moveto
+275.88053 477.39084 275.42708 478.42762 275.13086 479.453 curveto
+274.83463 480.4784 274.68652 481.51746 274.68652 482.57019 curveto
+274.68652 483.62293 274.83463 484.66655 275.13086 485.70105 curveto
+275.43164 486.731 275.88509 487.76778 276.49121 488.8114 curveto
+275.39746 488.8114 lineto
+274.71386 487.74044 274.20117 486.6877 273.85938 485.6532 curveto
+273.52213 484.6187 273.35351 483.59103 273.35352 482.57019 curveto
+273.35351 481.55392 273.52213 480.53081 273.85938 479.50085 curveto
+274.19661 478.47092 274.70931 477.41818 275.39746 476.34265 curveto
+276.49121 476.34265 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+279.35547 485.80359 moveto
+281.61133 485.80359 lineto
+281.61133 478.01746 lineto
+279.15723 478.50964 lineto
+279.15723 477.25183 lineto
+281.59766 476.75964 lineto
+282.97852 476.75964 lineto
+282.97852 485.80359 lineto
+285.23438 485.80359 lineto
+285.23438 486.9657 lineto
+279.35547 486.9657 lineto
+279.35547 485.80359 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+288.26953 485.80359 moveto
+290.52539 485.80359 lineto
+290.52539 478.01746 lineto
+288.07129 478.50964 lineto
+288.07129 477.25183 lineto
+290.51172 476.75964 lineto
+291.89258 476.75964 lineto
+291.89258 485.80359 lineto
+294.14844 485.80359 lineto
+294.14844 486.9657 lineto
+288.26953 486.9657 lineto
+288.26953 485.80359 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+297.08789 485.22937 moveto
+298.53027 485.22937 lineto
+298.53027 486.40515 lineto
+297.40918 488.59265 lineto
+296.52734 488.59265 lineto
+297.08789 486.40515 lineto
+297.08789 485.22937 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+304.35449 482.11902 moveto
+303.69824 482.11902 303.18099 482.29448 302.80273 482.64539 curveto
+302.42903 482.9963 302.24219 483.47937 302.24219 484.0946 curveto
+302.24219 484.70984 302.42903 485.19291 302.80273 485.54382 curveto
+303.18099 485.89474 303.69824 486.07019 304.35449 486.07019 curveto
+305.01074 486.07019 305.52799 485.89474 305.90625 485.54382 curveto
+306.2845 485.18836 306.47363 484.70528 306.47363 484.0946 curveto
+306.47363 483.47937 306.2845 482.9963 305.90625 482.64539 curveto
+305.53255 482.29448 305.01529 482.11902 304.35449 482.11902 curveto
+302.97363 481.53113 moveto
+302.38118 481.3853 301.91862 481.10958 301.58594 480.70398 curveto
+301.25781 480.29839 301.09375 479.80392 301.09375 479.22058 curveto
+301.09375 478.40483 301.38314 477.75998 301.96191 477.28601 curveto
+302.54524 476.81206 303.34277 476.57508 304.35449 476.57507 curveto
+305.37076 476.57508 306.16829 476.81206 306.74707 477.28601 curveto
+307.32584 477.75998 307.61523 478.40483 307.61523 479.22058 curveto
+307.61523 479.80392 307.44889 480.29839 307.11621 480.70398 curveto
+306.78808 481.10958 306.33007 481.3853 305.74219 481.53113 curveto
+306.40755 481.68608 306.9248 481.98914 307.29395 482.44031 curveto
+307.66764 482.89148 307.85448 483.44292 307.85449 484.0946 curveto
+307.85448 485.08354 307.55142 485.84233 306.94531 486.37097 curveto
+306.34374 486.89962 305.48014 487.16394 304.35449 487.16394 curveto
+303.22884 487.16394 302.36295 486.89962 301.75684 486.37097 curveto
+301.15527 485.84233 300.85449 485.08354 300.85449 484.0946 curveto
+300.85449 483.44292 301.04134 482.89148 301.41504 482.44031 curveto
+301.78874 481.98914 302.30827 481.68608 302.97363 481.53113 curveto
+302.46777 479.35046 moveto
+302.46777 479.87912 302.63183 480.29155 302.95996 480.58777 curveto
+303.29264 480.884 303.75748 481.03211 304.35449 481.0321 curveto
+304.94694 481.03211 305.4095 480.884 305.74219 480.58777 curveto
+306.07942 480.29155 306.24804 479.87912 306.24805 479.35046 curveto
+306.24804 478.82183 306.07942 478.40939 305.74219 478.11316 curveto
+305.4095 477.81694 304.94694 477.66883 304.35449 477.66882 curveto
+303.75748 477.66883 303.29264 477.81694 302.95996 478.11316 curveto
+302.63183 478.40939 302.46777 478.82183 302.46777 479.35046 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+310.45898 485.22937 moveto
+311.90137 485.22937 lineto
+311.90137 486.40515 lineto
+310.78027 488.59265 lineto
+309.89844 488.59265 lineto
+310.45898 486.40515 lineto
+310.45898 485.22937 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+317.56152 480.19128 moveto
+316.88704 480.19129 316.35384 480.45561 315.96191 480.98425 curveto
+315.56998 481.50835 315.37402 482.2284 315.37402 483.14441 curveto
+315.37402 484.06043 315.56771 484.78276 315.95508 485.3114 curveto
+316.347 485.83549 316.88248 486.09754 317.56152 486.09753 curveto
+318.23144 486.09754 318.76236 485.83321 319.1543 485.30457 curveto
+319.54622 484.77592 319.74218 484.05587 319.74219 483.14441 curveto
+319.74218 482.23751 319.54622 481.51974 319.1543 480.99109 curveto
+318.76236 480.45789 318.23144 480.19129 317.56152 480.19128 curveto
+317.56152 479.12488 moveto
+318.65527 479.12489 319.51432 479.48035 320.13867 480.19128 curveto
+320.76301 480.90223 321.07519 481.8866 321.0752 483.14441 curveto
+321.07519 484.39767 320.76301 485.38204 320.13867 486.09753 curveto
+319.51432 486.80847 318.65527 487.16394 317.56152 487.16394 curveto
+316.46321 487.16394 315.60189 486.80847 314.97754 486.09753 curveto
+314.35775 485.38204 314.04785 484.39767 314.04785 483.14441 curveto
+314.04785 481.8866 314.35775 480.90223 314.97754 480.19128 curveto
+315.60189 479.48035 316.46321 479.12489 317.56152 479.12488 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+322.95508 476.34265 moveto
+324.04883 476.34265 lineto
+324.73242 477.41818 325.24284 478.47092 325.58008 479.50085 curveto
+325.92187 480.53081 326.09277 481.55392 326.09277 482.57019 curveto
+326.09277 483.59103 325.92187 484.6187 325.58008 485.6532 curveto
+325.24284 486.6877 324.73242 487.74044 324.04883 488.8114 curveto
+322.95508 488.8114 lineto
+323.5612 487.76778 324.01237 486.731 324.30859 485.70105 curveto
+324.60937 484.66655 324.75976 483.62293 324.75977 482.57019 curveto
+324.75976 481.51746 324.60937 480.4784 324.30859 479.453 curveto
+324.01237 478.42762 323.5612 477.39084 322.95508 476.34265 curveto
+fill
+grestore
+gsave [1.436848 0 0 0.806571 118.9493 72.56034] concat
+0 0 0 setrgbcolor
+[] 0 setdash
+1.5 setlinewidth
+0 setlinejoin
+0 setlinecap
+newpath
+154.5 482.61218 moveto
+154.5 515.59418 125.716 542.36218 90.25 542.36218 curveto
+54.784 542.36218 26 515.59418 26 482.61218 curveto
+26 449.63018 54.784 422.86218 90.25 422.86218 curveto
+125.716 422.86218 154.5 449.63018 154.5 482.61218 curveto
+closepath
+stroke
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+244.37402 420.15613 moveto
+245.75488 420.15613 lineto
+245.75488 430.36218 lineto
+244.37402 430.36218 lineto
+244.37402 420.15613 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+249.69238 420.5321 moveto
+249.69238 422.70593 lineto
+252.2832 422.70593 lineto
+252.2832 423.68347 lineto
+249.69238 423.68347 lineto
+249.69238 427.83972 lineto
+249.69238 428.46407 249.77669 428.86511 249.94531 429.04285 curveto
+250.11849 429.22058 250.46712 429.30945 250.99121 429.30945 curveto
+252.2832 429.30945 lineto
+252.2832 430.36218 lineto
+250.99121 430.36218 lineto
+250.0205 430.36218 249.35058 430.18217 248.98145 429.82214 curveto
+248.6123 429.45756 248.42773 428.79675 248.42773 427.83972 curveto
+248.42773 423.68347 lineto
+247.50488 423.68347 lineto
+247.50488 422.70593 lineto
+248.42773 422.70593 lineto
+248.42773 420.5321 lineto
+249.69238 420.5321 lineto
+fill
+grestore
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+250.25488 525.79089 moveto
+250.25488 529.62585 lineto
+251.99121 529.62585 lineto
+252.63378 529.62586 253.13053 529.45952 253.48145 529.12683 curveto
+253.83235 528.79415 254.00781 528.3202 254.00781 527.70496 curveto
+254.00781 527.09429 253.83235 526.62261 253.48145 526.28992 curveto
+253.13053 525.95724 252.63378 525.7909 251.99121 525.79089 curveto
+250.25488 525.79089 lineto
+248.87402 524.65613 moveto
+251.99121 524.65613 lineto
+253.13509 524.65614 253.99869 524.9159 254.58203 525.43542 curveto
+255.16991 525.95041 255.46386 526.70692 255.46387 527.70496 curveto
+255.46386 528.71212 255.16991 529.47319 254.58203 529.98816 curveto
+253.99869 530.50314 253.13509 530.76062 251.99121 530.76062 curveto
+250.25488 530.76062 lineto
+250.25488 534.86218 lineto
+248.87402 534.86218 lineto
+248.87402 524.65613 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+257.68555 533.70007 moveto
+259.94141 533.70007 lineto
+259.94141 525.91394 lineto
+257.4873 526.40613 lineto
+257.4873 525.14832 lineto
+259.92773 524.65613 lineto
+261.30859 524.65613 lineto
+261.30859 533.70007 lineto
+263.56445 533.70007 lineto
+263.56445 534.86218 lineto
+257.68555 534.86218 lineto
+257.68555 533.70007 lineto
+fill
+grestore
+0 0 0 setrgbcolor
+[] 0 setdash
+1 setlinewidth
+0 setlinejoin
+0 setlinecap
+newpath
+155 302.36218 moveto
+154.5 534.36218 lineto
+stroke
+grestore
+showpage
+%%EOF
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+#include <config.h>
+
+#include <dune/common/parallel/indexset.hh>
+#include <dune/common/parallel/plocalindex.hh>
+#include <dune/common/parallel/mpihelper.hh>
+#include <iostream>
+#include "buildindexset.hh"
+#include "reverse.hh"
+
+int main(int argc, char **argv)
+{
+ // This is a parallel programm so we need to
+ // initialize mpi first.
+ Dune::MPIHelper& helper = Dune::MPIHelper::instance(argc, argv);
+
+ // The rank of our process
+ int rank = helper.rank();
+
+ // The type used as the global index
+ typedef int GlobalIndex;
+
+ // The index set we use to identify the local indices with the globally
+ // unique ones
+ typedef Dune::ParallelIndexSet<GlobalIndex,LocalIndex,100> ParallelIndexSet;
+
+ // The index set
+ ParallelIndexSet indexSet;
+
+ build(helper, indexSet);
+
+ // Print the index set
+ std::cout<<indexSet<<std::endl;
+
+
+ reverseLocalIndex(indexSet);
+
+ // Print the index set
+ if(rank==0)
+ std::cout<<"Reordered lcoal indices:"<<std::endl;
+
+ // Wait for all processes
+ helper.getCommunication().barrier();
+
+ std::cout<<indexSet<<std::endl;
+ // Assign new local indices
+
+ return 0;
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+#include <iostream>
+#include <dune/common/exceptions.hh> // We use exceptions
+#include <dune/common/parallel/mpihelper.hh> // An initializer of MPI
+#include <dune/common/parallel/indexset.hh>
+#include <dune/common/parallel/plocalindex.hh>
+
+enum Flags { owner, ghost };
+
+struct Bla
+{
+
+ /** @brief The local index. */
+ size_t localIndex_;
+
+ /** @brief An attribute for the index. */
+ char attribute_;
+
+ /** @brief True if the index is also known to other processors. */
+ bool public_;
+
+ /**
+ * @brief The state of the index.
+ *
+ * Has to be one of LocalIndexState!
+ * @see LocalIndexState.
+ */
+ char state_;
+};
+
+
+template<typename T1, typename T2>
+void buildBlockedIndexSet(T1& indexset, int N, const T2& comm)
+{
+ int rank=comm.rank();
+ int size=comm.size();
+ int localsize=N/size;
+ int bigger=N%size;
+ int start, end;
+ if(rank<bigger) {
+ start=rank*(localsize+1);
+ end=start+(localsize+1);
+ }else{
+ start=bigger*(localsize+1)+(rank-bigger)*localsize;
+ end=start+localsize;
+ }
+
+ indexset.beginResize();
+ int index=0;
+ int gindex=start;
+ typedef typename T1::LocalIndex LocalIndex;
+
+ std::cout<<sizeof(LocalIndex)<<" "<<sizeof(Bla)<<std::endl;
+
+ if(start>0)
+ indexset.add(gindex-1,LocalIndex(index++,ghost));
+
+ for(int i=start; i<end; i++,index++,gindex++)
+ indexset.add(gindex,LocalIndex(index,owner,true));
+
+ if(end<N)
+ indexset.add(gindex,LocalIndex(index,ghost,true));
+}
+int main(int argc, char** argv)
+{
+ int n=100;
+ try{
+ using namespace Dune;
+
+ //Maybe initialize Mpi
+ MPIHelper& helper = MPIHelper::instance(argc, argv);
+ std::cout << "Hello World! This is poosc08." << std::endl;
+ if(Dune::MPIHelper::isFake)
+ std::cout<< "This is a sequential program." << std::endl;
+ else{
+ typedef ParallelIndexSet<int,ParallelLocalIndex<Flags> > IndexSet;
+ IndexSet blockedSet;
+ buildBlockedIndexSet(blockedSet, n, helper.getCommunication());
+ }
+ return 0;
+ }
+ catch (Dune::Exception &e) {
+ std::cerr << "Dune reported error: " << e << std::endl;
+ }
+ catch (...) {
+ std::cerr << "Unknown exception thrown!" << std::endl;
+ }
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+#include <iostream>
+#include <dune/common/exceptions.hh> // We use exceptions
+#include <dune/common/parallel/mpihelper.hh> // An initializer of MPI
+#include <dune/common/parallel/indexset.hh>
+#include <dune/common/parallel/remoteindices.hh>
+#include <dune/common/parallel/communicator.hh>
+#include <dune/common/parallel/plocalindex.hh>
+#include <dune/common/parallel/interface.hh>
+#include <dune/common/enumset.hh>
+
+enum Flags { owner, ghost };
+
+template<typename T>
+struct AddData {
+ typedef typename T::value_type IndexedType;
+
+ static const IndexedType& gather(const T& v, int i){
+ return v[i];
+ }
+
+ static void scatter(T& v, const IndexedType& item, int i){
+ v[i]+=item;
+ }
+};
+
+template<typename T>
+struct CopyData {
+ typedef typename T::value_type IndexedType;
+
+ static const IndexedType& gather(const T& v, int i){
+ return v[i];
+ }
+
+ static void scatter(T& v, const IndexedType& item, int i){
+ v[i]=item;
+ }
+};
+
+
+template<class T>
+void doCalculations(T&){}
+
+#if HAVE_MPI
+void test()
+{
+ int rank;
+ MPI_Comm comm=(MPI_COMM_WORLD);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ using namespace Dune;
+ // shortcut for index set type
+ typedef ParallelLocalIndex<Flags> LocalIndex;
+ typedef ParallelIndexSet<int, LocalIndex > PIndexSet;
+ PIndexSet sis;
+ sis.beginResize();
+ if(rank==0) {
+
+ sis.add(11, LocalIndex(0, ghost));
+ for(int i=1; i<=6; i++)
+ sis.add(i-1, LocalIndex(i, owner, i<=1||i>5));
+ sis.add(6, LocalIndex(7, ghost));
+ }else{
+ sis.add(5, LocalIndex(0, ghost));
+ for(int i=1; i<=6; i++)
+ sis.add(5+i, LocalIndex(i, owner, i<=1||i>5));
+ sis.add(0,LocalIndex(7, ghost));
+ }
+ sis.endResize();
+
+ PIndexSet tis;
+ tis.beginResize();
+ int l=0;
+ for(int i=0; i<2; ++i)
+ for(int j=0; j<5; ++j) {
+ int g=rank*3-1+i*6+j;
+ if(g<0||g>11)
+ continue;
+ Flags flag=(j>0&&j<4) ? owner : ghost;
+ tis.add(g, LocalIndex(l++, flag));
+ }
+ tis.endResize();
+ std::cout<<rank<<" isxset: "<<sis<<std::endl;
+
+ RemoteIndices<PIndexSet> riRedist(sis, tis, comm);
+ riRedist.rebuild<true>();
+
+ std::vector<int> v;
+ RemoteIndices<PIndexSet> riS(sis,sis, comm, v, true);
+ riS.rebuild<false>();
+
+ std::cout<<std::endl<<"begin"<<rank<<" riS="<<riS<<" end"<<rank<<std::endl<<std::endl;
+
+ Combine<EnumItem<Flags,ghost>,EnumItem<Flags,owner>,Flags> ghostFlags;
+ EnumItem<Flags,owner> ownerFlags;
+ Combine<EnumItem<Flags,ghost>, EnumItem<Flags,owner> > allFlags;
+
+ Interface infRedist;
+ Interface infS;
+
+ infRedist.build(riRedist, ownerFlags, allFlags);
+ infS.build(riS, ownerFlags, ghostFlags);
+
+ std::cout<<"inf "<<rank<<": "<<infS<<std::endl;
+
+ typedef std::vector<double> Container;
+ Container s(sis.size(),3), t(tis.size());
+
+ s[sis.size()-1]=-1;
+
+ BufferedCommunicator bComm;
+ BufferedCommunicator bCommRedist;
+ bComm.build(s, s, infS);
+ //bCommRedist.build(s, t, infRedist);
+ for(std::size_t i=0; i<sis.size(); i++)
+ std::cout<<s[i]<<" ";
+ std::cout<<std::endl;
+
+ bComm.forward<CopyData<Container> >(s,s);
+
+ for(std::size_t i=0; i<sis.size(); i++)
+ std::cout<<s[i]<<" ";
+ std::cout<<std::endl;
+ //bCommRedist.forward<CopyData<Container> >(s,t);
+ // calculate on the redistributed array
+ doCalculations(t);
+ bCommRedist.backward<AddData<Container> >(s,t);
+}
+#endif // HAVE_MPI
+
+int main(int argc, char** argv)
+{
+ try{
+ using namespace Dune;
+#if HAVE_MPI
+ //Maybe initialize Mpi
+ MPIHelper& helper = MPIHelper::instance(argc, argv);
+ std::cout << "Hello World! This is poosc08. rank=" <<helper.rank()<< std::endl;
+ test();
+ return 0;
+#else
+ std::cout<< "Test poosc08_test disabled because MPI is not available." << std::endl;
+ return 77;
+#endif // HAVE_MPI
+ }
+ catch (Dune::Exception &e) {
+ std::cerr << "Dune reported error: " << e << std::endl;
+ }
+ catch (...) {
+ std::cerr << "Unknown exception thrown!" << std::endl;
+ }
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef REVERSE_HH
+#define REVERSE_HH
+
+#include "buildindexset.hh"
+
+/**
+ * @brief Reverse the local indices of an index set.
+ *
+ * Let the index set have N entries than the index 0 will become N-1,
+ * 1 become N-2, ..., and N-1 will become 0.
+ * @param indexSet The index set to reverse.
+ */
+template<typename TG, typename TL, int N>
+void reverseLocalIndex(Dune::ParallelIndexSet<TG,TL,N>& indexSet)
+{
+ // reverse the local indices
+ typedef typename Dune::ParallelIndexSet<TG,TL,N>::iterator iterator;
+
+ iterator end = indexSet.end();
+ size_t maxLocal = 0;
+
+ // find the maximal local index
+ for(iterator index = indexSet.begin(); index != end; ++index) {
+ // Get the local index
+ LocalIndex& local = index->local();
+ maxLocal = std::max(maxLocal, local.local());
+ }
+
+ for(iterator index = indexSet.begin(); index != end; ++index) {
+ // Get the local index
+ LocalIndex& local = index->local();
+ local = maxLocal--;
+ }
+
+}
+#endif
--- /dev/null
+# create Doxyfile.in and Doxyfile
+add_doxygen_target()
+
+install(
+ FILES
+ Doxystyle
+ doxygen-macros
+ DESTINATION
+ ${CMAKE_INSTALL_DATAROOTDIR}/dune-common/doc/doxygen
+ )
--- /dev/null
+# Where to search and which files to use
+INPUT += @srcdir@/mainpage.txt \
+ @srcdir@/modules.txt \
+ @top_srcdir@/dune/common/modules.txt \
+ @top_srcdir@/dune/common
+EXCLUDE += @top_srcdir@/dune/common/test \
+ @top_srcdir@/dune/common/debugallocator.cc
+
--- /dev/null
+#----------- Doxystyle -----------
+
+##################################################################################
+# Project Details:
+
+PROJECT_NAME = @DUNE_MOD_NAME@
+PROJECT_NUMBER = @DUNE_MOD_VERSION@
+
+##################################################################################
+# What to parse
+
+RECURSIVE = YES
+FILE_PATTERNS = *.hh \
+ *.cc
+INPUT =
+EXCLUDE =
+
+EXCLUDE_PATTERNS = */test/*
+
+EXCLUDE_SYMBOLS = Impl detail Imp Internal
+
+# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
+# in the INCLUDE_PATH (see below) will be search if a #include is found.
+
+SEARCH_INCLUDES = NO # don't warn about missing stl-headers etc.
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of
+# the path mentioned in the documentation of a class, which tells
+# the reader which header file to include in order to use a class.
+# If left blank only the name of the header file containing the class
+# definition is used. Otherwise one should specify the include paths that
+# are normally passed to the compiler using the -I flag.
+
+STRIP_FROM_INC_PATH = @abs_top_srcdir@
+
+WARNINGS = YES
+WARN_IF_UNDOCUMENTED = YES
+WARN_IF_DOC_ERROR = YES
+WARN_NO_PARAMDOC = NO
+WARN_LOGFILE = doxyerr.log
+
+#################################################################################
+# Styling
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+# inherited members of a class in the documentation of that class as if those
+# members were ordinary class members. Constructors, destructors and assignment
+# operators of the base classes will not be shown.
+
+INLINE_INHERITED_MEMB = YES
+
+# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
+# path before files name in the file list and in the header files. If set
+# to NO the shortest path that makes the file name unique will be used.
+
+FULL_PATH_NAMES = NO
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter
+# (but less readable) file names. This can be useful if your file systems
+# doesn't support long names like on DOS, Mac, or CD-ROM.
+
+SHORT_NAMES = YES
+
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
+# to include (a tag file for) the STL sources as input, then you should
+# set this tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string); v.s.
+# func(std::string) {}). This also make the inheritance and collaboration
+# diagrams that involve STL classes more complete and accurate.
+
+BUILTIN_STL_SUPPORT = YES
+
+# If the EXTRACT_STATIC tag is set to YES all static members of a file
+# will be included in the documentation.
+
+EXTRACT_STATIC = YES
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
+# undocumented members of documented classes, files or namespaces.
+# If set to NO (the default) these members will be included in the
+# various overviews, but no documentation section is generated.
+# This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_MEMBERS = NO
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy.
+# If set to NO (the default) these classes will be included in the various
+# overviews. This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_CLASSES = YES
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all
+# friend (class|struct|union) declarations.
+# If set to NO (the default) these declarations will be included in the
+# documentation.
+
+HIDE_FRIEND_COMPOUNDS = YES
+
+# The INTERNAL_DOCS tag determines if documentation
+# that is typed after a \internal command is included. If the tag is set
+# to NO (the default) then the documentation will be excluded.
+# Set it to YES to include the internal documentation.
+
+INTERNAL_DOCS = YES
+
+# If the sources in your project are distributed over multiple directories
+# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy
+# in the documentation. The default is NO.
+
+REFERENCED_BY_RELATION = NO
+REFERENCES_RELATION = NO
+ALPHABETICAL_INDEX = YES
+COLS_IN_ALPHA_INDEX = 3
+HTML_OUTPUT = html
+SEARCHENGINE = YES
+
+GENERATE_TODOLIST = YES
+GENERATE_TESTLIST = YES
+GENERATE_BUGLIST = YES
+GENERATE_DEPRECATEDLIST= YES
+
+GENERATE_HTML = YES
+GENERATE_DOCSET = NO
+GENERATE_HTMLHELP = NO
+GENERATE_CHI = NO
+GENERATE_QHP = NO
+GENERATE_TREEVIEW = NO
+
+GENERATE_LATEX = NO
+GENERATE_RTF = NO
+GENERATE_MAN = NO
+GENERATE_XML = NO
+GENERATE_AUTOGEN_DEF = NO
+GENERATE_PERLMOD = NO
+GENERATE_TAGFILE = @DUNE_MOD_NAME@.tag
+GENERATE_LEGEND = NO
+
+MACRO_EXPANSION = YES
+EXPAND_ONLY_PREDEF = YES
+
+
+@DOT_TRUE@HAVE_DOT = YES
+
+# If set to YES, the inheritance and collaboration graphs will show the
+# relations between templates and their instances.
+
+TEMPLATE_RELATIONS = YES
+CLASS_GRAPH = YES
+COLLABORATION_GRAPH = NO
+GROUP_GRAPHS = YES
+INCLUDE_GRAPH = NO
+INCLUDED_BY_GRAPH = NO
+GRAPHICAL_HIERARCHY = NO
+DOT_MULTI_TARGETS = NO
+GENERATE_LEGEND = NO
+DOT_CLEANUP = NO
+
+#####################################################################
+# Header Footer and Stylesheet in use is controlled by the Makefile #
+# (christi 16. Jan 2006) #
+#####################################################################
+
+# The HTML_HEADER tag can be used to specify a personal HTML header for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard header.
+
+HTML_HEADER =
+
+# The HTML_FOOTER tag can be used to specify a personal HTML footer for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard footer.
+
+HTML_FOOTER =
+
+########################## DOXYGEN DOXYSTYLE
+EXTRACT_ALL = YES
+EXTRACT_PRIVATE = NO
+EXTRACT_STATIC = YES
+EXTRACT_LOCAL_CLASSES = YES
+EXTRACT_LOCAL_METHODS = NO
+EXTRACT_ANON_NSPACES = NO
+HIDE_UNDOC_MEMBERS = NO
+HIDE_UNDOC_CLASSES = NO
+HIDE_FRIEND_COMPOUNDS = YES
+HIDE_IN_BODY_DOCS = NO
+INTERNAL_DOCS = NO
+CASE_SENSE_NAMES = NO
+HIDE_SCOPE_NAMES = NO
+SHOW_INCLUDE_FILES = YES
+INLINE_INFO = YES
+SORT_MEMBER_DOCS = YES
+SORT_BRIEF_DOCS = NO
+SORT_MEMBERS_CTORS_1ST = NO
+SORT_GROUP_NAMES = NO
+SORT_BY_SCOPE_NAME = NO
+GENERATE_TODOLIST = YES
+GENERATE_TESTLIST = YES
+GENERATE_BUGLIST = YES
+GENERATE_DEPRECATEDLIST= YES
+ENABLED_SECTIONS =
+MAX_INITIALIZER_LINES = 30
+SHOW_FILES = YES
+SHOW_NAMESPACES = YES
+FILE_VERSION_FILTER =
+LAYOUT_FILE =
--- /dev/null
+# This file contains the list of predefined preprocessor macros required for running
+# Doxygen. It should be included automatically by the build system after
+# Doxystyle.
+#
+# The reason to have this separate is for building the website, where we
+# would otherwise have to maintain these definitions a second time.
+
+PREDEFINED = DOXYGEN \
+ HAVE_MPI:=1 \
+ _DEBUG_ALLOCATOR_H:=1 \
+ "DUNE_DEPRECATED:=/** \deprecated */" \
+ "DUNE_DEPRECATED_MSG(A):=/** \deprecated A */" \
+ "DUNE_INLINE_VARIABLE:= " \
+ __cpp_inline_variables:=201606 \
+ __cpp_constexpr:=201603 \
+ __cpp_variable_templates:=201304 \
+
+
+# marker - here to allow the last line continuation
--- /dev/null
+/** \mainpage dune-common Automatic Documentation
+
+\section intro Introduction
+
+Welcome to the %Dune documentation pages. This documentation has been
+generated using Doxygen, a free source code documentation system for
+documenting C/C++ code.
+
+\section mods Modules
+
+The best way to start is from the page \subpage modules which gives
+you access to the documentation by category.
+
+*/
+
+/** \page modules Modules
+*/
--- /dev/null
+/**
+ @defgroup Common Common
+ @brief foundation classes
+*/
+
+/**
+ @defgroup Allocators Allocators
+ @brief Implementations of the STL allocator concept
+ @ingroup Common
+*/
+
+/**
+ @defgroup Utilities Utilities
+ @brief Collection of helper classes, type traits, etc.
+ @ingroup Common
+*/
+
+/**
+ @defgroup Path Filesystem Paths
+ @brief Utilities for filesystem path management
+ @ingroup Utilities
+*/
+
+/**
+ @defgroup RangeUtilities Range Utilities
+ @brief Utilities for reduction like operations on ranges
+
+ All these reduction operations work for appropriate ranges and scalar values
+
+ @ingroup Utilities
+*/
+
+/**
+ @defgroup StringUtilities String Utilities
+ @brief Utility functions for std::string
+ @ingroup Utilities
+*/
+
+/**
+ @defgroup TupleUtilities Tuple Utilities
+ @brief Utility classes which can be used with std::tuple
+ @ingroup Utilities
+*/
+
+/**
+ @defgroup TypeUtilities Type Utilities
+ @brief Type traits, overload helpers, and other utilities for type computations
+ @ingroup Utilities
+*/
+
+/**
+ @defgroup HybridUtilities Hybrid Utilities
+ @brief Hybrid utility functions that work on homogeneous as well as heterogeneous containers
+ @ingroup Utilities
+*/
+
+/**
+ @defgroup CxxUtilities C++ utilities and backports
+ @brief Standard library features backported from newer C++ versions or technical specifications and DUNE-specific utilities.
+ @ingroup Utilities
+*/
+
+/**
+ @defgroup CxxConcepts C++ concepts
+ @brief Concepts built on top of C++14.
+ @ingroup Utilities
+*/
+
+/**
+ @defgroup Numbers Numbers
+ @brief Class implementing different number representations and helper functions
+ @ingroup Common
+*/
+
+/**
+ @defgroup FloatCmp FloatCmp
+ @ingroup Numbers
+*/
--- /dev/null
+.\" First parameter, NAME, should be all caps
+.\" Second parameter, SECTION, should be 1-8, maybe w/ subsection
+.\" other parameters are allowed: see man(7), man(1)
+.TH DUNECONTROL 1 "November 8, 2016"
+.\" Please adjust this date whenever revising the manpage.
+.\"
+.\" Some roff macros, for reference:
+.\" .nh disable hyphenation
+.\" .hy enable hyphenation
+.\" .ad l left justify
+.\" .ad b justify to both left and right margins
+.\" .nf disable filling
+.\" .fi enable filling
+.\" .br insert line break
+.\" .sp <n> insert n+1 empty lines
+.\" for manpage-specific macros, see man(7)
+.SH NAME
+dunecontrol \- Control program for the Dune build system
+.SH SYNOPSIS
+.B dunecontrol
+[\fIOPTIONS\fP] \fICOMMANDS\fP [\fICOMMAND-OPTIONS\fP]
+.SH DESCRIPTION
+.B dunecontrol
+is the control program for the build system of the Dune libraries.
+
+The Dune libraries form a set of modules. Each can be built independently using CMake.
+Additionally, though, there are dependencies between modules,
+which are expected to form a directed acyclic graph. These dependencies are set in a
+file called
+.B dune.module
+contained in the main directory of each Dune module.
+
+The
+.B dunecontrol
+program helps to build sets of inter-dependent modules. It allows to construct
+the entire dependency graph and obtain information about it. Then it allows to run various build-related
+commands for all modules. These are executed in the order mandated by the dependency graph.
+
+.SH COMMANDS
+Colon-separated list of commands. Available commands are:
+.HP
+.B help
+.IP
+Show a help message and exit
+.HP
+.B print
+.IP
+Print the list of modules sorted according to their dependency relations
+.HP
+.B info
+.IP
+Same as `print', but including whether it is a dependency or suggestion
+.HP
+.B printdeps
+.IP
+Print recursive dependencies of a module
+.HP
+.B vcsetup
+.IP
+Setup version control repository (Git etc.) or working copy (SVN)
+.HP
+.B update
+.IP
+Update all modules from the repository from their respective version control systems
+.HP
+.B configure
+.IP
+Run cmake for each module
+.HP
+.B make
+.IP
+Run make for each module
+.HP
+.B all
+.IP
+Run the 'configure' and 'make' commands for each module
+.HP
+.B exec
+.IP
+Execute an arbitrary command in each module source directory
+.HP
+.B bexec
+.IP
+Execute an arbitrary command in each module build directory
+.HP
+.B status
+.IP
+Show version control status for each module
+.HP
+.B svn
+.IP
+Run svn command for each svn-managed module
+.HP
+.B git
+.IP
+Run git command for each git-managed module
+.HP
+.B export
+.IP
+Run eval `dunecontrol export` to save the list of dune.module files to the DUNE_CONTROL_PATH variable
+.SH OPTIONS
+.HP
+\fB\-h\fP, \fB\-\-help\fP
+.IP
+Show this help
+.HP
+\fB--debug\fP
+.IP
+Run with debugging output enabled
+.HP
+\fB--module=\fP\fImod\fP
+.IP
+Apply the actions on module
+.I mod
+and all modules it depends on
+.HP
+\fB--only=\fP\fImod\fP
+.IP
+Only apply the actions on module
+.I mod
+, but not on the modules it depends on
+.HP
+\fB--current\fP
+.IP
+Only apply the actions on the current module, the one whose source tree we are in
+.HP
+\fB--current-dep\fP
+.IP
+Apply the actions on the current module, and all modules it depends on
+.HP
+\fB--resume\fP
+.IP
+Resume a previous run (only consider the modules not built successfully on the previous run)
+.HP
+\fB--skipfirst\fP
+.IP
+Skip the first module (use with --resume)
+.HP
+\fB--skipversioncheck\fP
+.IP
+When looking for Dune modules, do not check whether they have the required versions
+.HP
+\fB--opts=\fP\fIfile\fP
+.IP
+Load default options from \fIfile\fP
+.HP
+\fB--builddir=\fP\fIname\fP
+.IP
+Make out-of-source builds in a subdir \fIname\fP. This directory is created inside each module.
+.HP
+\fB--[COMMAND]-opts=\fP\fIopts\fP
+.IP
+Set options for COMMAND (this is mainly useful for the 'all' COMMAND)
+
+
+.SH ENVIRONMENT VARIABLES
+.B dunecontrol
+looks for Dune modules in all directories given in the
+.B DUNE_CONTROL_PATH
+variable, and additionally recursively in all subdirectories of those directories.
+The default for the case that DUNE_CONTROL_PATH is empty is the current directory,
+plus a system-wide installation in /usr.
+
+.SH AUTHOR
+Dune was written by the Dune team (https://www.dune-project.org/community/people).
+.PP
+This manual page was written by Oliver Sander.
+
+.SH COPYRIGHT
+Copying and distribution of this file, with or without modification,
+are permitted in any medium without royalty provided the copyright
+notice and this notice are preserved. This file is offered as-is,
+without any warranty.
--- /dev/null
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+includedir=@includedir@
+CXX=@CXX@
+CC=@CC@
+DEPENDENCIES=@REQUIRES@
+
+Name: @PACKAGE_NAME@
+Version: @VERSION@
+Description: @DESCRIPTION@
+URL: @URL@
+Requires: ${DEPENDENCIES}
+Libs: -L${libdir} -ldunecommon
+Cflags: -I${includedir}
--- /dev/null
+Module: dune-common
+Version: 2.8.0
+Author: The Dune Core developers
+Maintainer: dune-devel@lists.dune-project.org
+Description: Basis infrastructure for all Dune modules
+URL: https://gitlab.dune-project.org/core/dune-common
+Python-Requires: portalocker numpy wheel mpi4py
+Whitespace-Hook: Yes
--- /dev/null
+add_subdirectory(common)
+
+# if Python bindings are enabled, include necessary sub directories.
+if(DUNE_ENABLE_PYTHONBINDINGS)
+ add_subdirectory(python)
+else()
+ exclude_subdir_from_headercheck(python)
+endif()
--- /dev/null
+add_subdirectory("parallel")
+add_subdirectory("simd")
+add_subdirectory("std")
+add_subdirectory("test")
+
+#build the library dunecommon
+
+dune_add_library("dunecommon"
+ debugalign.cc
+ debugallocator.cc
+ exceptions.cc
+ fmatrixev.cc
+ ios_state.cc
+ parametertree.cc
+ parametertreeparser.cc
+ path.cc
+ simd/test.cc
+ stdstreams.cc
+ stdthread.cc)
+
+add_dune_blas_lapack_flags(dunecommon)
+add_dune_tbb_flags(dunecommon)
+
+#install headers
+install(FILES
+ alignedallocator.hh
+ arraylist.hh
+ assertandreturn.hh
+ bartonnackmanifcheck.hh
+ bigunsignedint.hh
+ binaryfunctions.hh
+ bitsetvector.hh
+ boundschecking.hh
+ classname.hh
+ concept.hh
+ conditional.hh
+ debugalign.hh
+ debugallocator.hh
+ debugstream.hh
+ deprecated.hh
+ densematrix.hh
+ densevector.hh
+ diagonalmatrix.hh
+ documentation.hh
+ dotproduct.hh
+ dynmatrix.hh
+ dynmatrixev.hh
+ dynvector.hh
+ enumset.hh
+ exceptions.hh
+ filledarray.hh
+ float_cmp.cc
+ float_cmp.hh
+ fmatrix.hh
+ fmatrixev.hh
+ ftraits.hh
+ function.hh
+ fvector.hh
+ gcd.hh
+ genericiterator.hh
+ gmpfield.hh
+ hash.hh
+ hybridutilities.hh
+ indent.hh
+ indices.hh
+ interfaces.hh
+ ios_state.hh
+ iteratorfacades.hh
+ iteratorrange.hh
+ keywords.hh
+ lcm.hh
+ lru.hh
+ mallocallocator.hh
+ math.hh
+ matvectraits.hh
+ overloadset.hh
+ parameterizedobject.hh
+ parametertree.hh
+ parametertreeparser.hh
+ path.hh
+ poolallocator.hh
+ power.hh
+ precision.hh
+ propertymap.hh
+ promotiontraits.hh
+ proxymemberaccess.hh
+ quadmath.hh
+ rangeutilities.hh
+ reservedvector.hh
+ scalarvectorview.hh
+ scalarmatrixview.hh
+ shared_ptr.hh
+ simd.hh
+ singleton.hh
+ sllist.hh
+ stdstreams.hh
+ stdthread.hh
+ streamoperators.hh
+ stringutility.hh
+ to_unique_ptr.hh
+ timer.hh
+ transpose.hh
+ tupleutility.hh
+ tuplevector.hh
+ typelist.hh
+ typetraits.hh
+ typeutilities.hh
+ unused.hh
+ vc.hh
+ version.hh
+ visibility.hh
+DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/dune/common)
+
+# Install some test headers, because they get used by tests in other modules
+# We do this here as test will not be considered for make install
+install(FILES test/iteratortest.hh
+ test/checkmatrixinterface.hh
+ DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/dune/common/test)
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_ALIGNED_ALLOCATOR_HH
+#define DUNE_ALIGNED_ALLOCATOR_HH
+
+#include "mallocallocator.hh"
+#include <cstdlib>
+
+
+namespace Dune
+{
+
+ /**
+ @ingroup Allocators
+ @brief Allocators which guarantee alignment of the memory
+
+ @tparam T type of the object one wants to allocate
+ @tparam Alignment explicitly specify the alignment, by default it is std::alignment_of<T>::value
+ */
+ template<class T, int Alignment = -1>
+ class AlignedAllocator : public MallocAllocator<T> {
+
+ /*
+ * Check whether an explicit alignment was
+ * restricted or fall back to the default alignment of T.
+ */
+ static constexpr int fixAlignment(int align)
+ {
+ return (Alignment==-1) ? std::alignment_of<T>::value : Alignment;
+ }
+
+ public:
+ using pointer = typename MallocAllocator<T>::pointer;
+ using size_type = typename MallocAllocator<T>::size_type;
+ template <class U> struct rebind {
+ typedef AlignedAllocator<U,Alignment> other;
+ };
+
+ static constexpr int alignment = fixAlignment(sizeof(void*));
+
+ //! allocate n objects of type T
+ pointer allocate(size_type n, [[maybe_unused]] const void* hint = 0)
+ {
+ if (n > this->max_size())
+ throw std::bad_alloc();
+
+ /*
+ * Everybody else gets the standard treatment.
+ */
+ pointer ret = static_cast<pointer>(std::aligned_alloc(alignment, n * sizeof(T)));
+ if (!ret)
+ throw std::bad_alloc();
+
+ return ret;
+ }
+ };
+
+}
+
+#endif // DUNE_ALIGNED_ALLOCATOR_HH
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+#ifndef DUNE_COMMON_ARRAYLIST_HH
+#define DUNE_COMMON_ARRAYLIST_HH
+
+#include <array>
+#include <cassert>
+#include <memory>
+#include <vector>
+#include "iteratorfacades.hh"
+
+namespace Dune
+{
+ // forward declaration
+ template<class T, int N, class A>
+ class ArrayListIterator;
+
+ template<class T, int N, class A>
+ class ConstArrayListIterator;
+
+ /**
+ * @file
+ * \brief Implements a random-access container that can efficiently change size (similar to std::deque)
+ *
+ * This file implements the class ArrayList which behaves like
+ * dynamically growing array together with
+ * the class ArrayListIterator which is random access iterator as needed
+ * by the stl for sorting and other algorithms.
+ * @author Markus Blatt
+ */
+ /**
+ * @addtogroup Common
+ *
+ * @{
+ */
+
+ /**
+ * @brief A dynamically growing random access list.
+ *
+ * Internally the data is organised in a list of arrays of fixed size.
+ * Whenever the capacity of the array list is not sufficient a new
+ * std::array is allocated. In contrast to
+ * std::vector this approach prevents data copying. On the outside
+ * we provide the same interface as the stl random access containers.
+ *
+ * While the concept sounds quite similar to std::deque there are slight
+ * but crucial differences:
+ * - In contrast to std:deque the actual implementation (a list of arrays)
+ * is known. While
+ * for std::deque there are at least two possible implementations
+ * (dynamic array or using a double linked list.
+ * - In contrast to std:deque there is not insert which invalidates iterators
+ * but our push_back method leaves all iterators valid.
+ * - Additional functionality lets one delete entries before and at an
+ * iterator while moving the iterator to the next valid position.
+ */
+ template<class T, int N=100, class A=std::allocator<T> >
+ class ArrayList
+ {
+ public:
+ /**
+ * @brief The member type that is stored.
+ *
+ * Has to be assignable and has to have an empty constructor.
+ */
+ typedef T MemberType;
+
+ /**
+ * @brief Value type for stl compliance.
+ */
+ typedef T value_type;
+
+ /**
+ * @brief The type of a reference to the type we store.
+ */
+ typedef T& reference;
+
+ /**
+ * @brief The type of a const reference to the type we store.
+ */
+ typedef const T& const_reference;
+
+ /**
+ * @brief The type of a pointer to the type we store.
+ */
+ typedef T* pointer;
+
+ /**
+ * @brief The type of a const pointer to the type we store.
+ */
+ typedef const T* const_pointer;
+
+ enum
+ {
+ /**
+ * @brief The number of elements in one chunk of the list.
+ * This has to be at least one. The default is 100.
+ */
+ chunkSize_ = (N > 0) ? N : 1
+ };
+
+ /**
+ * @brief A random access iterator.
+ */
+ typedef ArrayListIterator<MemberType,N,A> iterator;
+
+ /**
+ * @brief A constant random access iterator.
+ */
+ typedef ConstArrayListIterator<MemberType,N,A> const_iterator;
+
+ /**
+ * @brief The size type.
+ */
+ typedef std::size_t size_type;
+
+ /**
+ * @brief The difference type.
+ */
+ typedef std::ptrdiff_t difference_type;
+
+ /**
+ * @brief Get an iterator that is positioned at the first element.
+ * @return The iterator.
+ */
+ iterator begin();
+
+ /**
+ * @brief Get a random access iterator that is positioned at the
+ * first element.
+ * @return The iterator.
+ */
+ const_iterator begin() const;
+
+ /**
+ * @brief Get a random access iterator positioned after the last
+ * element
+ */
+ iterator end();
+
+ /**
+ * @brief Get a random access iterator positioned after the last
+ * element
+ */
+ const_iterator end() const;
+
+ /**
+ * @brief Append an entry to the list.
+ * @param entry The new entry.
+ */
+ inline void push_back(const_reference entry);
+
+ /**
+ * @brief Get the element at specific position.
+ * @param i The index of the position.
+ * @return The element at that position.
+ */
+ inline reference operator[](size_type i);
+
+ /**
+ * @brief Get the element at specific position.
+ * @param i The index of the position.
+ * @return The element at that position.
+ */
+ inline const_reference operator[](size_type i) const;
+
+ /**
+ * @brief Get the number of elements in the list.
+ * @return The number of elements.
+ */
+ inline size_type size() const;
+
+ /**
+ * @brief Purge the list.
+ *
+ * If there are empty chunks at the front all nonempty
+ * chunks will be moved towards the front and the capacity
+ * increases.
+ */
+ inline void purge();
+
+ /**
+ * @brief Delete all entries from the list.
+ */
+ inline void clear();
+ /**
+ * @brief Constructs an Array list with one chunk.
+ */
+ ArrayList();
+
+ private:
+
+ /**
+ * @brief The allocators for the smart pointer.
+ */
+ using SmartPointerAllocator = typename std::allocator_traits<A>::template rebind_alloc< std::shared_ptr< std::array<MemberType,chunkSize_> > >;
+
+ /**
+ * @brief The allocator for the fixed array.
+ */
+ using ArrayAllocator = typename std::allocator_traits<A>::template rebind_alloc< std::array<MemberType,chunkSize_> >;
+
+ /**
+ * @brief The iterator needs access to the private variables.
+ */
+ friend class ArrayListIterator<T,N,A>;
+ friend class ConstArrayListIterator<T,N,A>;
+
+ /** @brief the data chunks of our list. */
+ std::vector<std::shared_ptr<std::array<MemberType,chunkSize_> >,
+ SmartPointerAllocator> chunks_;
+ /** @brief The current data capacity.
+ * This is the capacity that the list could have theoretically
+ * with this number of chunks. That is chunks * chunkSize.
+ * In practice some of the chunks at the beginning might be empty
+ * (i.e. null pointers in the first start_/chunkSize chunks)
+ * because of previous calls to eraseToHere.
+ * start_+size_<=capacity_ holds.
+ */
+ size_type capacity_;
+ /** @brief The current number of elements in our data structure. */
+ size_type size_;
+ /** @brief The index of the first entry. */
+ size_type start_;
+ /**
+ * @brief Get the element at specific position.
+ *
+ * Index 0 always refers to the first entry in the list
+ * whether it is erased or not!
+ * @param i The index of the position.
+ * @return The element at that position.
+ */
+ inline reference elementAt(size_type i);
+
+ /**
+ * @brief Get the element at specific position.
+ *
+ * Index 0 always refers to the first entry in the list
+ * whether it is erased or not!
+ * @param i The index of the position.
+ * @return The element at that position.
+ */
+ inline const_reference elementAt(size_type i) const;
+ };
+
+
+ /**
+ * @brief A random access iterator for the Dune::ArrayList class.
+ */
+ template<class T, int N, class A>
+ class ArrayListIterator : public RandomAccessIteratorFacade<ArrayListIterator<T,N,A>,
+ typename A::value_type,
+ typename A::value_type &,
+ typename A::difference_type>
+ {
+
+ friend class ArrayList<T,N,A>;
+ friend class ConstArrayListIterator<T,N,A>;
+ public:
+ /**
+ * @brief The member type.
+ */
+ typedef typename A::value_type MemberType;
+
+ typedef typename A::difference_type difference_type;
+
+ typedef typename A::size_type size_type;
+
+ using reference = typename A::value_type &;
+
+ using const_reference = typename A::value_type const&;
+
+ enum
+ {
+ /**
+ * @brief The number of elements in one chunk of the list.
+ *
+ * This has to be at least one. The default is 100.
+ */
+ chunkSize_ = (N > 0) ? N : 1
+ };
+
+
+ /**
+ * @brief Comares two iterators.
+ * @return True if the iterators are for the same list and
+ * at the position.
+ */
+ inline bool equals(const ArrayListIterator<MemberType,N,A>& other) const;
+
+ /**
+ * @brief Comares two iterators.
+ * @return True if the iterators are for the same list and
+ * at the position.
+ */
+ inline bool equals(const ConstArrayListIterator<MemberType,N,A>& other) const;
+
+ /**
+ * @brief Increment the iterator.
+ */
+ inline void increment();
+
+ /**
+ * @brief decrement the iterator.
+ */
+ inline void decrement();
+
+ /**
+ * @brief Get the value of the list at an arbitrary position.
+ * @return The value at that position.
+ */
+ inline reference elementAt(size_type i) const;
+
+ /**
+ * @brief Access the element at the current position.
+ * @return The element at the current position.
+ */
+ inline reference dereference() const;
+
+ /**
+ * @brief Erase all entries before the current position
+ * and the one at the current position.
+ *
+ * Afterwards the iterator will be positioned at the next
+ * unerased entry or the end if the list is empty.
+ * This does not invalidate any iterators positioned after
+ * the current position but those positioned at previous ones.
+ * @return An iterator to the first position after the deleted
+ * ones or to the end if the list is empty.
+ */
+ inline void eraseToHere();
+
+ /** \todo Please doc me! */
+ inline size_type position(){return position_;}
+
+ /** \todo Please doc me! */
+ inline void advance(difference_type n);
+
+ /** \todo Please doc me! */
+ inline difference_type distanceTo(const ArrayListIterator<T,N,A>& other) const;
+
+ /** \todo Please doc me! */
+ inline ArrayListIterator<T,N,A>& operator=(const ArrayListIterator<T,N,A>& other);
+
+ //! Standard constructor
+ inline ArrayListIterator() : position_(0), list_(nullptr)
+ {}
+
+ private:
+ /**
+ * @brief Constructor.
+ * @param list The list we are an iterator for.
+ * @param position The initial position of the iterator.
+ */
+ inline ArrayListIterator(ArrayList<T,N,A>& arrayList, size_type position);
+
+ /**
+ * @brief The current position.
+ */
+ size_type position_;
+ /**
+ * @brief The list we are an iterator for.
+ */
+ ArrayList<T,N,A>* list_;
+ };
+
+ /**
+ * @brief A constant random access iterator for the Dune::ArrayList class.
+ */
+ template<class T, int N, class A>
+ class ConstArrayListIterator
+ : public RandomAccessIteratorFacade<ConstArrayListIterator<T,N,A>,
+ const typename A::value_type,
+ typename A::value_type const&,
+ typename A::difference_type>
+ {
+
+ friend class ArrayList<T,N,A>;
+ friend class ArrayListIterator<T,N,A>;
+
+ public:
+ /**
+ * @brief The member type.
+ */
+ typedef typename A::value_type MemberType;
+
+ typedef typename A::difference_type difference_type;
+
+ typedef typename A::size_type size_type;
+
+ using reference = typename A::value_type &;
+
+ using const_reference = typename A::value_type const&;
+
+ enum
+ {
+ /**
+ * @brief The number of elements in one chunk of the list.
+ *
+ * This has to be at least one. The default is 100.
+ */
+ chunkSize_ = (N > 0) ? N : 1
+ };
+
+ /**
+ * @brief Comares to iterators.
+ * @return true if the iterators are for the same list and
+ * at the position.
+ */
+ inline bool equals(const ConstArrayListIterator<MemberType,N,A>& other) const;
+
+ /**
+ * @brief Increment the iterator.
+ */
+ inline void increment();
+
+ /**
+ * @brief decrement the iterator.
+ */
+ inline void decrement();
+
+ /** \todo Please doc me! */
+ inline void advance(difference_type n);
+
+ /** \todo Please doc me! */
+ inline difference_type distanceTo(const ConstArrayListIterator<T,N,A>& other) const;
+
+ /**
+ * @brief Get the value of the list at an arbitrary position.
+ * @return The value at that position.
+ */
+ inline const_reference elementAt(size_type i) const;
+
+ /**
+ * @brief Access the element at the current position.
+ * @return The element at the current position.
+ */
+ inline const_reference dereference() const;
+
+ inline const ConstArrayListIterator<T,N,A>& operator=(const ConstArrayListIterator<T,N,A>& other);
+
+ inline ConstArrayListIterator() : position_(0), list_(nullptr)
+ {}
+
+ inline ConstArrayListIterator(const ArrayListIterator<T,N,A>& other);
+
+ private:
+
+ /**
+ * @brief Constructor.
+ * @param list The list we are an iterator for.
+ * @param position The initial position of the iterator.
+ */
+ inline ConstArrayListIterator(const ArrayList<T,N,A>& arrayList, size_type position);
+
+ /**
+ * @brief The current position.
+ */
+ size_type position_;
+ /**
+ * @brief The list we are an iterator for.
+ */
+ const ArrayList<T,N,A>* list_;
+ };
+
+
+ template<class T, int N, class A>
+ ArrayList<T,N,A>::ArrayList()
+ : capacity_(0), size_(0), start_(0)
+ {
+ chunks_.reserve(100);
+ }
+
+ template<class T, int N, class A>
+ void ArrayList<T,N,A>::clear(){
+ capacity_=0;
+ size_=0;
+ start_=0;
+ chunks_.clear();
+ }
+
+ template<class T, int N, class A>
+ size_t ArrayList<T,N,A>::size() const
+ {
+ return size_;
+ }
+
+ template<class T, int N, class A>
+ void ArrayList<T,N,A>::push_back(const_reference entry)
+ {
+ size_t index=start_+size_;
+ if(index==capacity_)
+ {
+ chunks_.push_back(std::make_shared<std::array<MemberType,chunkSize_> >());
+ capacity_ += chunkSize_;
+ }
+ elementAt(index)=entry;
+ ++size_;
+ }
+
+ template<class T, int N, class A>
+ typename ArrayList<T,N,A>::reference ArrayList<T,N,A>::operator[](size_type i)
+ {
+ return elementAt(start_+i);
+ }
+
+
+ template<class T, int N, class A>
+ typename ArrayList<T,N,A>::const_reference ArrayList<T,N,A>::operator[](size_type i) const
+ {
+ return elementAt(start_+i);
+ }
+
+ template<class T, int N, class A>
+ typename ArrayList<T,N,A>::reference ArrayList<T,N,A>::elementAt(size_type i)
+ {
+ return chunks_[i/chunkSize_]->operator[](i%chunkSize_);
+ }
+
+
+ template<class T, int N, class A>
+ typename ArrayList<T,N,A>::const_reference ArrayList<T,N,A>::elementAt(size_type i) const
+ {
+ return chunks_[i/chunkSize_]->operator[](i%chunkSize_);
+ }
+
+ template<class T, int N, class A>
+ ArrayListIterator<T,N,A> ArrayList<T,N,A>::begin()
+ {
+ return ArrayListIterator<T,N,A>(*this, start_);
+ }
+
+ template<class T, int N, class A>
+ ConstArrayListIterator<T,N,A> ArrayList<T,N,A>::begin() const
+ {
+ return ConstArrayListIterator<T,N,A>(*this, start_);
+ }
+
+ template<class T, int N, class A>
+ ArrayListIterator<T,N,A> ArrayList<T,N,A>::end()
+ {
+ return ArrayListIterator<T,N,A>(*this, start_+size_);
+ }
+
+ template<class T, int N, class A>
+ ConstArrayListIterator<T,N,A> ArrayList<T,N,A>::end() const
+ {
+ return ConstArrayListIterator<T,N,A>(*this, start_+size_);
+ }
+
+ template<class T, int N, class A>
+ void ArrayList<T,N,A>::purge()
+ {
+ // Distance to copy to the left.
+ size_t distance = start_/chunkSize_;
+ if(distance>0) {
+ // Number of chunks with entries in it;
+ size_t chunks = ((start_%chunkSize_ + size_)/chunkSize_ );
+
+ // Copy chunks to the left.
+ std::copy(chunks_.begin()+distance,
+ chunks_.begin()+(distance+chunks), chunks_.begin());
+
+ // Calculate new parameters
+ start_ = start_ % chunkSize_;
+ //capacity += distance * chunkSize_;
+ }
+ }
+
+ template<class T, int N, class A>
+ void ArrayListIterator<T,N,A>::advance(difference_type i)
+ {
+ position_+=i;
+ }
+
+ template<class T, int N, class A>
+ void ConstArrayListIterator<T,N,A>::advance(difference_type i)
+ {
+ position_+=i;
+ }
+
+
+ template<class T, int N, class A>
+ bool ArrayListIterator<T,N,A>::equals(const ArrayListIterator<MemberType,N,A>& other) const
+ {
+ // Makes only sense if we reference a common list
+ assert(list_==(other.list_));
+ return position_==other.position_ ;
+ }
+
+
+ template<class T, int N, class A>
+ bool ArrayListIterator<T,N,A>::equals(const ConstArrayListIterator<MemberType,N,A>& other) const
+ {
+ // Makes only sense if we reference a common list
+ assert(list_==(other.list_));
+ return position_==other.position_ ;
+ }
+
+
+ template<class T, int N, class A>
+ bool ConstArrayListIterator<T,N,A>::equals(const ConstArrayListIterator<MemberType,N,A>& other) const
+ {
+ // Makes only sense if we reference a common list
+ assert(list_==(other.list_));
+ return position_==other.position_ ;
+ }
+
+ template<class T, int N, class A>
+ void ArrayListIterator<T,N,A>::increment()
+ {
+ ++position_;
+ }
+
+ template<class T, int N, class A>
+ void ConstArrayListIterator<T,N,A>::increment()
+ {
+ ++position_;
+ }
+
+ template<class T, int N, class A>
+ void ArrayListIterator<T,N,A>::decrement()
+ {
+ --position_;
+ }
+
+ template<class T, int N, class A>
+ void ConstArrayListIterator<T,N,A>::decrement()
+ {
+ --position_;
+ }
+
+ template<class T, int N, class A>
+ typename ArrayListIterator<T,N,A>::reference ArrayListIterator<T,N,A>::elementAt(size_type i) const
+ {
+ return list_->elementAt(i+position_);
+ }
+
+ template<class T, int N, class A>
+ typename ConstArrayListIterator<T,N,A>::const_reference ConstArrayListIterator<T,N,A>::elementAt(size_type i) const
+ {
+ return list_->elementAt(i+position_);
+ }
+
+ template<class T, int N, class A>
+ typename ArrayListIterator<T,N,A>::reference ArrayListIterator<T,N,A>::dereference() const
+ {
+ return list_->elementAt(position_);
+ }
+
+ template<class T, int N, class A>
+ typename ConstArrayListIterator<T,N,A>::const_reference ConstArrayListIterator<T,N,A>::dereference() const
+ {
+ return list_->elementAt(position_);
+ }
+
+ template<class T, int N, class A>
+ typename ArrayListIterator<T,N,A>::difference_type ArrayListIterator<T,N,A>::distanceTo(const ArrayListIterator<T,N,A>& other) const
+ {
+ // Makes only sense if we reference a common list
+ assert(list_==(other.list_));
+ return other.position_ - position_;
+ }
+
+ template<class T, int N, class A>
+ typename ConstArrayListIterator<T,N,A>::difference_type ConstArrayListIterator<T,N,A>::distanceTo(const ConstArrayListIterator<T,N,A>& other) const
+ {
+ // Makes only sense if we reference a common list
+ assert(list_==(other.list_));
+ return other.position_ - position_;
+ }
+
+ template<class T, int N, class A>
+ ArrayListIterator<T,N,A>& ArrayListIterator<T,N,A>::operator=(const ArrayListIterator<T,N,A>& other)
+ {
+ position_=other.position_;
+ list_=other.list_;
+ return *this;
+ }
+
+ template<class T, int N, class A>
+ const ConstArrayListIterator<T,N,A>& ConstArrayListIterator<T,N,A>::operator=(const ConstArrayListIterator<T,N,A>& other)
+ {
+ position_=other.position_;
+ list_=other.list_;
+ return *this;
+ }
+
+ template<class T, int N, class A>
+ void ArrayListIterator<T,N,A>::eraseToHere()
+ {
+ list_->size_ -= ++position_ - list_->start_;
+ // chunk number of the new position.
+ size_t posChunkStart = position_ / chunkSize_;
+ // number of chunks to deallocate
+ size_t chunks = (position_ - list_->start_ + list_->start_ % chunkSize_)
+ / chunkSize_;
+ list_->start_ = position_;
+
+ // Deallocate memory not needed any more.
+ for(size_t chunk=0; chunk<chunks; chunk++) {
+ --posChunkStart;
+ list_->chunks_[posChunkStart].reset();
+ }
+
+ // Capacity stays the same as the chunks before us
+ // are still there. They null pointers.
+ assert(list_->start_+list_->size_<=list_->capacity_);
+ }
+
+ template<class T, int N, class A>
+ ArrayListIterator<T,N,A>::ArrayListIterator(ArrayList<T,N,A>& arrayList, size_type position)
+ : position_(position), list_(&arrayList)
+ {}
+
+
+ template<class T, int N, class A>
+ ConstArrayListIterator<T,N,A>::ConstArrayListIterator(const ArrayList<T,N,A>& arrayList,
+ size_type position)
+ : position_(position), list_(&arrayList)
+ {}
+
+ template<class T, int N, class A>
+ ConstArrayListIterator<T,N,A>::ConstArrayListIterator(const ArrayListIterator<T,N,A>& other)
+ : position_(other.position_), list_(other.list_)
+ {}
+
+
+ /** @} */
+}
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_COMMON_ASSERTANDRETURN_HH
+#define DUNE_COMMON_ASSERTANDRETURN_HH
+
+#include <cassert>
+
+//! Asserts a condition and return on success in constexpr context.
+/**
+ * The macro DUNE_ASSERT_AND_RETURN can be used as expression in the return
+ * statement of a constexpr function to have assert() and constexpr at the
+ * same time. It first uses assert for the condition given by the first argument
+ * and then returns the value of the second argument.
+ *
+ * \ingroup CxxUtilities
+ */
+#ifdef NDEBUG
+ #define DUNE_ASSERT_AND_RETURN(C,X) X
+#else
+ #define DUNE_ASSERT_AND_RETURN(C,X) (!(C) ? throw [&](){assert(!#C);return 0;}() : 0), X
+#endif
+
+
+
+#endif // DUNE_COMMON_ASSERTANDRETURN_HH
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+/** @file
+ @author Robert Kloefkorn
+ @brief Provides check for implementation of interface methods when using
+ static polymorphism, i.e. the Barton-Nackman trick. This is purely for
+ debugging purposes. To check the correct implementation of interface methods
+ (and pick up possible infinite loops) NDEBUG must be undefined and
+ DUNE_INTERFACECHECK has to be defined.
+
+ Use by invoking CHECK_INTERFACE_IMPLEMENTATION(asImp().methodToCheck())
+ and for
+ template methods double (CHECK_INTERFACE_IMPLEMENTATION((asImp().template methodToCheck<param> ())).
+ If either NDEBUG is defined or
+ DUNE_INTERFACECHECK is undefined the CHECK_INTERFACE_IMPLEMENTATION macro is empty.
+
+ Note: adding the interface check to a method will cause the implementation of the
+ method to be called twice, so before use make sure
+ that this will not cause problems e.g. if internal counters are updated.
+ **/
+
+//- Dune includes
+#include <dune/common/exceptions.hh>
+
+#ifdef CHECK_INTERFACE_IMPLEMENTATION
+#undef CHECK_INTERFACE_IMPLEMENTATION
+#endif
+#ifdef CHECK_AND_CALL_INTERFACE_IMPLEMENTATION
+#undef CHECK_AND_CALL_INTERFACE_IMPLEMENTATION
+#endif
+
+#if defined NDEBUG || !defined DUNE_INTERFACECHECK
+#define CHECK_INTERFACE_IMPLEMENTATION(dummy)
+#else
+#define CHECK_INTERFACE_IMPLEMENTATION(__interface_method_to_call__) \
+ {\
+ static bool call = false; \
+ if( call == true ) \
+ DUNE_THROW(NotImplemented,"Interface method not implemented!");\
+ call = true; \
+ try { \
+ (__interface_method_to_call__); \
+ call = false; \
+ } \
+ catch ( ... ) \
+ { \
+ call = false; \
+ throw; \
+ } \
+ }
+#endif
+
+/** The macro CHECK_AND_CALL_INTERFACE_IMPLEMENTATION throws an exception,
+ if the interface method ist not implemented and just calls the method
+ otherwise. If NDEBUG is defined no
+ checking is done and the method is just called.
+ */
+#if defined NDEBUG || !defined DUNE_INTERFACECHECK
+#define CHECK_AND_CALL_INTERFACE_IMPLEMENTATION(__interface_method_to_call__) \
+ (__interface_method_to_call__)
+#else
+#define CHECK_AND_CALL_INTERFACE_IMPLEMENTATION(__interface_method_to_call__) \
+ CHECK_INTERFACE_IMPLEMENTATION(__interface_method_to_call__)
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+#ifndef DUNE_BIGUNSIGNEDINT_HH
+#define DUNE_BIGUNSIGNEDINT_HH
+
+#include <algorithm>
+#include <iostream>
+#include <limits>
+#include <cstdint>
+#include <cstdlib>
+#include <type_traits>
+#include <dune/common/exceptions.hh>
+#include <dune/common/hash.hh>
+
+/**
+ * @file
+ * @brief Portable very large unsigned integers
+ * @author Peter Bastian
+ */
+
+namespace Dune
+{
+#if HAVE_MPI
+ template<class K>
+ struct MPITraits;
+#endif
+
+ /** @addtogroup Numbers
+ *
+ * @{
+ */
+
+ namespace Impl {
+
+ // numeric_limits_helper provides std::numeric_limits access to the internals
+ // of bigunsignedint. Previously, the correct specialization of std::numeric_limits
+ // was a friend of bigunsignedint, but that creates problems on recent versions
+ // of clang with the alternative libc++ library, because that library declares the
+ // base template of std::numeric_limits as a class and clang subsequently complains
+ // if the friend declaration uses 'struct'. Unfortunately, libstdc++ uses a struct,
+ // making it impossible to keep clang happy for both standard libraries.
+ // So we move the access helper functionality into a custom struct and simply let
+ // the numeric_limits specialization inherit from the helper.
+
+ template<typename T>
+ struct numeric_limits_helper
+ {
+
+ protected:
+
+ static std::uint16_t& digit(T& big_unsigned_int, std::size_t i)
+ {
+ return big_unsigned_int.digit[i];
+ }
+
+ };
+
+ }
+
+ /**
+ * @brief Portable very large unsigned integers
+ *
+ * Implements (arbitrarily) large unsigned integers to be used as global
+ * ids in some grid managers. Size is a template parameter.
+ *
+ * \tparam k Number of bits of the integer type
+ */
+
+ template<int k>
+ class bigunsignedint {
+ public:
+
+ // unsigned short is 16 bits wide, n is the number of digits needed
+ enum { bits=std::numeric_limits<std::uint16_t>::digits, n=k/bits+(k%bits!=0),
+ hexdigits=4, bitmask=0xFFFF, compbitmask=0xFFFF0000,
+ overflowmask=0x1 };
+
+ //! Construct uninitialized
+ bigunsignedint ();
+
+ //! Construct from signed int
+ template<typename Signed>
+ bigunsignedint (Signed x, typename std::enable_if<std::is_integral<Signed>::value && std::is_signed<Signed>::value>::type* = 0);
+
+ //! Construct from unsigned int
+ bigunsignedint (std::uintmax_t x);
+
+ //! Print number in hex notation
+ void print (std::ostream& s) const ;
+
+ //! add
+ bigunsignedint<k> operator+ (const bigunsignedint<k>& x) const;
+ bigunsignedint<k>& operator+= (const bigunsignedint<k>& x);
+
+ //! subtract
+ bigunsignedint<k> operator- (const bigunsignedint<k>& x) const;
+ bigunsignedint<k>& operator-= (const bigunsignedint<k>& x);
+
+ //! multiply
+ bigunsignedint<k> operator* (const bigunsignedint<k>& x) const;
+ bigunsignedint<k>& operator*= (const bigunsignedint<k>& x);
+
+ //! prefix increment
+ bigunsignedint<k>& operator++ ();
+
+ //! divide
+ //! \warning This function is very slow and its usage should be
+ //! prevented if possible
+ bigunsignedint<k> operator/ (const bigunsignedint<k>& x) const;
+ bigunsignedint<k>& operator/= (const bigunsignedint<k>& x);
+
+ //! modulo
+ //! \warning This function is very slow and its usage should be
+ //! prevented if possible
+ bigunsignedint<k> operator% (const bigunsignedint<k>& x) const;
+ bigunsignedint<k>& operator%= (const bigunsignedint<k>& x);
+
+ //! bitwise and
+ bigunsignedint<k> operator& (const bigunsignedint<k>& x) const;
+ bigunsignedint<k>& operator&= (const bigunsignedint<k>& x);
+
+ //! bitwise exor
+ bigunsignedint<k> operator^ (const bigunsignedint<k>& x) const;
+ bigunsignedint<k>& operator^= (const bigunsignedint<k>& x);
+
+ //! bitwise or
+ bigunsignedint<k> operator| (const bigunsignedint<k>& x) const;
+ bigunsignedint<k>& operator|= (const bigunsignedint<k>& x);
+
+ //! bitwise complement
+ bigunsignedint<k> operator~ () const;
+
+
+ //! left shift
+ bigunsignedint<k> operator<< (int i) const;
+
+ //! right shift
+ bigunsignedint<k> operator>> (int i) const;
+
+
+ //! less than
+ bool operator< (const bigunsignedint<k>& x) const;
+
+ //! less than or equal
+ bool operator<= (const bigunsignedint<k>& x) const;
+
+ //! greater than
+ bool operator> (const bigunsignedint<k>& x) const;
+
+ //! greater or equal
+ bool operator>= (const bigunsignedint<k>& x) const;
+
+ //! equal
+ bool operator== (const bigunsignedint<k>& x) const;
+
+ //! not equal
+ bool operator!= (const bigunsignedint<k>& x) const;
+
+
+ //! export to other types
+ // operator unsigned int () const;
+ std::uint_least32_t touint() const;
+ /**
+ * @brief Convert to a double.
+ *
+ * @warning Subject to rounding errors!
+ */
+ double todouble() const;
+
+ friend class bigunsignedint<k/2>;
+ friend struct Impl::numeric_limits_helper< bigunsignedint<k> >;
+
+ inline friend std::size_t hash_value(const bigunsignedint& arg)
+ {
+ return hash_range(arg.digit,arg.digit + arg.n);
+ }
+
+ private:
+ std::uint16_t digit[n];
+#if HAVE_MPI
+ friend struct MPITraits<bigunsignedint<k> >;
+#endif
+ inline void assign(std::uintmax_t x);
+
+
+ } ;
+
+ // Constructors
+ template<int k>
+ bigunsignedint<k>::bigunsignedint ()
+ {
+ assign(0u);
+ }
+
+ template<int k>
+ template<typename Signed>
+ bigunsignedint<k>::bigunsignedint (Signed y, typename std::enable_if<std::is_integral<Signed>::value && std::is_signed<Signed>::value>::type*)
+ {
+ if (y < 0)
+ DUNE_THROW(Dune::Exception, "Trying to construct a Dune::bigunsignedint from a negative integer: " << y);
+ assign(y);
+ }
+
+ template<int k>
+ bigunsignedint<k>::bigunsignedint (std::uintmax_t x)
+ {
+ assign(x);
+ }
+ template<int k>
+ void bigunsignedint<k>::assign(std::uintmax_t x)
+ {
+ static const int no=std::min(static_cast<int>(n),
+ static_cast<int>(std::numeric_limits<std::uintmax_t>::digits/bits));
+
+ for(int i=0; i<no; ++i) {
+ digit[i] = (x&bitmask);
+ x=x>>bits;
+ }
+ for (unsigned int i=no; i<n; i++) digit[i]=0;
+ }
+
+ // export
+ template<int k>
+ inline std::uint_least32_t bigunsignedint<k>::touint () const
+ {
+ return (digit[1]<<bits)+digit[0];
+ }
+
+ template<int k>
+ inline double bigunsignedint<k>::todouble() const
+ {
+ int firstInZeroRange=n;
+ for(int i=n-1; i>=0; --i)
+ if(digit[i]!=0)
+ break;
+ else
+ --firstInZeroRange;
+ int representableDigits=std::numeric_limits<double>::digits/bits;
+ int lastInRepresentableRange=0;
+ if(representableDigits<firstInZeroRange)
+ lastInRepresentableRange=firstInZeroRange-representableDigits;
+ double val=0;
+ for(int i=firstInZeroRange-1; i>=lastInRepresentableRange; --i)
+ val =val*(1<<bits)+digit[i];
+ return val*(1<<(bits*lastInRepresentableRange));
+ }
+ // print
+ template<int k>
+ inline void bigunsignedint<k>::print (std::ostream& s) const
+ {
+ bool leading=false;
+
+ // print from left to right
+ for (int i=n-1; i>=0; i--)
+ for (int d=hexdigits-1; d>=0; d--)
+ {
+ // extract one hex digit
+ int current = (digit[i]>>(d*4))&0xF;
+ if (current!=0)
+ {
+ // s.setf(std::ios::noshowbase);
+ s << std::hex << current;
+ leading = false;
+ }
+ else if (!leading) s << std::hex << current;
+ }
+ if (leading) s << "0";
+ s << std::dec;
+ }
+
+ template <int k>
+ inline std::ostream& operator<< (std::ostream& s, const bigunsignedint<k>& x)
+ {
+ x.print(s);
+ return s;
+ }
+
+ #define DUNE_BINOP(OP) \
+ template <int k> \
+ inline bigunsignedint<k> bigunsignedint<k>::operator OP (const bigunsignedint<k> &x) const \
+ { \
+ auto temp = *this; \
+ temp OP##= x; \
+ return temp; \
+ }
+
+ DUNE_BINOP(+)
+ DUNE_BINOP(-)
+ DUNE_BINOP(*)
+ DUNE_BINOP(/)
+ DUNE_BINOP(%)
+ DUNE_BINOP(&)
+ DUNE_BINOP(^)
+ DUNE_BINOP(|)
+
+ #undef DUNE_BINOP
+
+ template <int k>
+ inline bigunsignedint<k>& bigunsignedint<k>::operator+= (const bigunsignedint<k>& x)
+ {
+ std::uint_fast32_t overflow=0;
+
+ for (unsigned int i=0; i<n; i++)
+ {
+ std::uint_fast32_t sum = static_cast<std::uint_fast32_t>(digit[i]) + static_cast<std::uint_fast32_t>(x.digit[i]) + overflow;
+ digit[i] = sum&bitmask;
+ overflow = (sum>>bits)&overflowmask;
+ }
+ return *this;
+ }
+
+ template <int k>
+ inline bigunsignedint<k>& bigunsignedint<k>::operator-= (const bigunsignedint<k>& x)
+ {
+ std::int_fast32_t overflow=0;
+
+ for (unsigned int i=0; i<n; i++)
+ {
+ std::int_fast32_t diff = static_cast<std::int_fast32_t>(digit[i]) - static_cast<std::int_fast32_t>(x.digit[i]) - overflow;
+ if (diff>=0)
+ {
+ digit[i] = static_cast<std::uint16_t>(diff);
+ overflow = 0;
+ }
+ else
+ {
+ digit[i] = static_cast<std::uint16_t>(diff+bitmask+1);
+ overflow = 1;
+ }
+ }
+ return *this;
+ }
+
+ template <int k>
+ inline bigunsignedint<k>& bigunsignedint<k>::operator*= (const bigunsignedint<k>& x)
+ {
+ bigunsignedint<2*k> finalproduct(0);
+
+ for (unsigned int m=0; m<n; m++) // digit in right factor
+ {
+ bigunsignedint<2*k> singleproduct(0);
+ std::uint_fast32_t overflow(0);
+ for (unsigned int i=0; i<n; i++)
+ {
+ std::uint_fast32_t digitproduct = static_cast<std::uint_fast32_t>(digit[i])*static_cast<std::uint_fast32_t>(x.digit[m])+overflow;
+ singleproduct.digit[i+m] = static_cast<std::uint16_t>(digitproduct&bitmask);
+ overflow = (digitproduct>>bits)&bitmask;
+ }
+ finalproduct = finalproduct+singleproduct;
+ }
+
+ for (unsigned int i=0; i<n; i++) digit[i] = finalproduct.digit[i];
+ return *this;
+ }
+
+ template <int k>
+ inline bigunsignedint<k>& bigunsignedint<k>::operator++ ()
+ {
+ std::uint_fast32_t overflow=1;
+
+ for (unsigned int i=0; i<n; i++)
+ {
+ std::uint_fast32_t sum = static_cast<std::uint_fast32_t>(digit[i]) + overflow;
+ digit[i] = sum&bitmask;
+ overflow = (sum>>bits)&overflowmask;
+ }
+ return *this;
+ }
+
+ template <int k>
+ inline bigunsignedint<k>& bigunsignedint<k>::operator/= (const bigunsignedint<k>& x)
+ {
+ if(x==0)
+ DUNE_THROW(Dune::MathError, "division by zero!");
+
+ // better slow than nothing
+ bigunsignedint<k> result(0);
+
+ while (*this>=x)
+ {
+ ++result;
+ *this -= x;
+ }
+
+ *this = result;
+ return *this;
+ }
+
+ template <int k>
+ inline bigunsignedint<k>& bigunsignedint<k>::operator%= (const bigunsignedint<k>& x)
+ {
+ // better slow than nothing
+ while (*this>=x)
+ {
+ *this -= x;
+ }
+
+ return *this;
+ }
+
+ template <int k>
+ inline bigunsignedint<k>& bigunsignedint<k>::operator&= (const bigunsignedint<k>& x)
+ {
+ for (unsigned int i=0; i<n; i++)
+ digit[i] = digit[i]&x.digit[i];
+ return *this;
+ }
+
+ template <int k>
+ inline bigunsignedint<k>& bigunsignedint<k>::operator^= (const bigunsignedint<k>& x)
+ {
+ for (unsigned int i=0; i<n; i++)
+ digit[i] = digit[i]^x.digit[i];
+ return *this;
+ }
+
+ template <int k>
+ inline bigunsignedint<k>& bigunsignedint<k>::operator|= (const bigunsignedint<k>& x)
+ {
+ for (unsigned int i=0; i<n; i++)
+ digit[i] = digit[i]|x.digit[i];
+ return *this;
+ }
+
+ template <int k>
+ inline bigunsignedint<k> bigunsignedint<k>::operator~ () const
+ {
+ bigunsignedint<k> result;
+ for (unsigned int i=0; i<n; i++)
+ result.digit[i] = ~digit[i];
+ return result;
+ }
+
+ template <int k>
+ inline bigunsignedint<k> bigunsignedint<k>::operator<< (int shift) const
+ {
+ bigunsignedint<k> result(0);
+
+ // multiples of bits
+ int j=shift/bits;
+ for (int i=n-1-j; i>=0; i--)
+ result.digit[i+j] = digit[i];
+
+ // remainder
+ j=shift%bits;
+ for (int i=n-1; i>=0; i--)
+ {
+ unsigned int temp = result.digit[i];
+ temp = temp<<j;
+ result.digit[i] = static_cast<std::uint16_t>(temp&bitmask);
+ temp = temp>>bits;
+ if (i+1<(int)n)
+ result.digit[i+1] = result.digit[i+1]|temp;
+ }
+
+ return result;
+ }
+
+ template <int k>
+ inline bigunsignedint<k> bigunsignedint<k>::operator>> (int shift) const
+ {
+ bigunsignedint<k> result(0);
+
+ // multiples of bits
+ int j=shift/bits;
+ for (unsigned int i=0; i<n-j; i++)
+ result.digit[i] = digit[i+j];
+
+ // remainder
+ j=shift%bits;
+ for (unsigned int i=0; i<n; i++)
+ {
+ std::uint_fast32_t temp = result.digit[i];
+ temp = temp<<(bits-j);
+ result.digit[i] = static_cast<std::uint16_t>((temp&compbitmask)>>bits);
+ if (i>0)
+ result.digit[i-1] = result.digit[i-1] | (temp&bitmask);
+ }
+
+ return result;
+ }
+
+ template <int k>
+ inline bool bigunsignedint<k>::operator!= (const bigunsignedint<k>& x) const
+ {
+ for (unsigned int i=0; i<n; i++)
+ if (digit[i]!=x.digit[i]) return true;
+ return false;
+ }
+
+ template <int k>
+ inline bool bigunsignedint<k>::operator== (const bigunsignedint<k>& x) const
+ {
+ return !((*this)!=x);
+ }
+
+ template <int k>
+ inline bool bigunsignedint<k>::operator< (const bigunsignedint<k>& x) const
+ {
+ for (int i=n-1; i>=0; i--)
+ if (digit[i]<x.digit[i]) return true;
+ else if (digit[i]>x.digit[i]) return false;
+ return false;
+ }
+
+ template <int k>
+ inline bool bigunsignedint<k>::operator<= (const bigunsignedint<k>& x) const
+ {
+ for (int i=n-1; i>=0; i--)
+ if (digit[i]<x.digit[i]) return true;
+ else if (digit[i]>x.digit[i]) return false;
+ return true;
+ }
+
+ template <int k>
+ inline bool bigunsignedint<k>::operator> (const bigunsignedint<k>& x) const
+ {
+ return !((*this)<=x);
+ }
+
+ template <int k>
+ inline bool bigunsignedint<k>::operator>= (const bigunsignedint<k>& x) const
+ {
+ return !((*this)<x);
+ }
+
+
+ template <int k>
+ inline bigunsignedint<k> operator+ (const bigunsignedint<k>& x, std::uintmax_t y)
+ {
+ bigunsignedint<k> temp(y);
+ return x+temp;
+ }
+
+ template <int k>
+ inline bigunsignedint<k> operator- (const bigunsignedint<k>& x, std::uintmax_t y)
+ {
+ bigunsignedint<k> temp(y);
+ return x-temp;
+ }
+
+ template <int k>
+ inline bigunsignedint<k> operator* (const bigunsignedint<k>& x, std::uintmax_t y)
+ {
+ bigunsignedint<k> temp(y);
+ return x*temp;
+ }
+
+ template <int k>
+ inline bigunsignedint<k> operator/ (const bigunsignedint<k>& x, std::uintmax_t y)
+ {
+ bigunsignedint<k> temp(y);
+ return x/temp;
+ }
+
+ template <int k>
+ inline bigunsignedint<k> operator% (const bigunsignedint<k>& x, std::uintmax_t y)
+ {
+ bigunsignedint<k> temp(y);
+ return x%temp;
+ }
+
+ template <int k>
+ inline bigunsignedint<k> operator+ (std::uintmax_t x, const bigunsignedint<k>& y)
+ {
+ bigunsignedint<k> temp(x);
+ return temp+y;
+ }
+
+ template <int k>
+ inline bigunsignedint<k> operator- (std::uintmax_t x, const bigunsignedint<k>& y)
+ {
+ bigunsignedint<k> temp(x);
+ return temp-y;
+ }
+
+ template <int k>
+ inline bigunsignedint<k> operator* (std::uintmax_t x, const bigunsignedint<k>& y)
+ {
+ bigunsignedint<k> temp(x);
+ return temp*y;
+ }
+
+ template <int k>
+ inline bigunsignedint<k> operator/ (std::uintmax_t x, const bigunsignedint<k>& y)
+ {
+ bigunsignedint<k> temp(x);
+ return temp/y;
+ }
+
+ template <int k>
+ inline bigunsignedint<k> operator% (std::uintmax_t x, const bigunsignedint<k>& y)
+ {
+ bigunsignedint<k> temp(x);
+ return temp%y;
+ }
+
+
+ /** @} */
+}
+
+namespace std
+{
+ template<int k>
+ struct numeric_limits<Dune::bigunsignedint<k> >
+ : private Dune::Impl::numeric_limits_helper<Dune::bigunsignedint<k> > // for access to internal state of bigunsignedint
+ {
+ public:
+ static const bool is_specialized = true;
+
+ static Dune::bigunsignedint<k> min()
+ {
+ return static_cast<Dune::bigunsignedint<k> >(0);
+ }
+
+ static Dune::bigunsignedint<k> max()
+ {
+ Dune::bigunsignedint<k> max_;
+ for(std::size_t i=0; i < Dune::bigunsignedint<k>::n; ++i)
+ // access internal state via the helper base class
+ Dune::Impl::numeric_limits_helper<Dune::bigunsignedint<k> >::
+ digit(max_,i)=std::numeric_limits<std::uint16_t>::max();
+ return max_;
+ }
+
+
+ static const int digits = Dune::bigunsignedint<k>::bits *
+ Dune::bigunsignedint<k>::n;
+ static const bool is_signed = false;
+ static const bool is_integer = true;
+ static const bool is_exact = true;
+ static const int radix = 2;
+
+ static Dune::bigunsignedint<k> epsilon()
+ {
+ return static_cast<Dune::bigunsignedint<k> >(0);
+ }
+
+ static Dune::bigunsignedint<k> round_error()
+ {
+ return static_cast<Dune::bigunsignedint<k> >(0);
+ }
+
+ static const int min_exponent = 0;
+ static const int min_exponent10 = 0;
+ static const int max_exponent = 0;
+ static const int max_exponent10 = 0;
+
+ static const bool has_infinity = false;
+ static const bool has_quiet_NaN = false;
+ static const bool has_signaling_NaN = false;
+
+ static const float_denorm_style has_denorm = denorm_absent;
+ static const bool has_denorm_loss = false;
+
+ static Dune::bigunsignedint<k> infinity() noexcept
+ {
+ return static_cast<Dune::bigunsignedint<k> >(0);
+ }
+
+ static Dune::bigunsignedint<k> quiet_NaN() noexcept
+ {
+ return static_cast<Dune::bigunsignedint<k> >(0);
+ }
+
+ static Dune::bigunsignedint<k> signaling_NaN() noexcept
+ {
+ return static_cast<Dune::bigunsignedint<k> >(0);
+ }
+
+ static Dune::bigunsignedint<k> denorm_min() noexcept
+ {
+ return static_cast<Dune::bigunsignedint<k> >(0);
+ }
+
+ static const bool is_iec559 = false;
+ static const bool is_bounded = true;
+ static const bool is_modulo = true;
+
+ static const bool traps = false;
+ static const bool tinyness_before = false;
+ static const float_round_style round_style = round_toward_zero;
+
+ };
+
+}
+
+DUNE_DEFINE_HASH(DUNE_HASH_TEMPLATE_ARGS(int k),DUNE_HASH_TYPE(Dune::bigunsignedint<k>))
+
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_BINARYFUNCTIONS_HH
+#define DUNE_BINARYFUNCTIONS_HH
+
+/** \file
+ * \brief helper classes to provide unique types for standard functions
+ */
+
+#include <algorithm>
+
+namespace Dune
+{
+ template<typename Type>
+ struct Min
+ {
+ using first_argument_type [[deprecated("This type alias is deprecated following similar deprecations in C++17")]] = Type;
+
+ using second_argument_type [[deprecated("This type alias is deprecated following similar deprecations in C++17")]] = Type;
+
+ using result_type [[deprecated("This type alias is deprecated following similar deprecations in C++17")]] = Type;
+
+ Type operator()(const Type& t1, const Type& t2) const
+ {
+ using std::min;
+ return min(t1,t2);
+ }
+ };
+
+ template<typename Type>
+ struct Max
+ {
+ using first_argument_type [[deprecated("This type alias is deprecated following similar deprecations in C++17")]] = Type;
+
+ using second_argument_type [[deprecated("This type alias is deprecated following similar deprecations in C++17")]] = Type;
+
+ using result_type [[deprecated("This type alias is deprecated following similar deprecations in C++17")]] = Type;
+
+ Type operator()(const Type& t1, const Type& t2) const
+ {
+ using std::max;
+ return max(t1,t2);
+ }
+ };
+}
+
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_BLOCK_BITFIELD_HH
+#define DUNE_BLOCK_BITFIELD_HH
+
+/** \file
+ \brief Efficient implementation of a dynamic array of static arrays of booleans
+ */
+
+#include <vector>
+#include <bitset>
+#include <iostream>
+#include <algorithm>
+
+#include <dune/common/boundschecking.hh>
+#include <dune/common/genericiterator.hh>
+#include <dune/common/exceptions.hh>
+
+namespace Dune {
+
+ template <int block_size, class Alloc> class BitSetVector;
+ template <int block_size, class Alloc> class BitSetVectorReference;
+
+ /**
+ \brief A proxy class that acts as a const reference to a single
+ bitset in a BitSetVector.
+
+ It contains a conversion to std::bitset and most of the
+ interface of const std::bitset.
+
+ \warning As this is only a proxy class, you can not get the
+ address of the bitset.
+ */
+ template <int block_size, class Alloc>
+ class BitSetVectorConstReference
+ {
+ protected:
+
+ typedef Dune::BitSetVector<block_size, Alloc> BitSetVector;
+ friend class Dune::BitSetVector<block_size, Alloc>;
+
+ BitSetVectorConstReference(const BitSetVector& blockBitField_, int block_number_) :
+ blockBitField(blockBitField_),
+ block_number(block_number_)
+ {
+ DUNE_ASSERT_BOUNDS(blockBitField_.size() > static_cast<size_t>(block_number_));
+ }
+
+ //! hide assignment operator
+ BitSetVectorConstReference& operator=(const BitSetVectorConstReference & b);
+
+ public:
+
+ typedef std::bitset<block_size> bitset;
+
+ // bitset interface typedefs
+ typedef typename std::vector<bool, Alloc>::const_reference reference;
+ typedef typename std::vector<bool, Alloc>::const_reference const_reference;
+ typedef size_t size_type;
+
+ //! Returns a copy of *this shifted left by n bits.
+ bitset operator<<(size_type n) const
+ {
+ bitset b = *this;
+ b <<= n;
+ return b;
+ }
+
+ //! Returns a copy of *this shifted right by n bits.
+ bitset operator>>(size_type n) const
+ {
+ bitset b = *this;
+ b >>= n;
+ return b;
+ }
+
+ //! Returns a copy of *this with all of its bits flipped.
+ bitset operator~() const
+ {
+ bitset b = *this;
+ b.flip();
+ return b;
+ }
+
+ //! Returns block_size.
+ size_type size() const
+ {
+ return block_size;
+ }
+
+ //! Returns the number of bits that are set.
+ size_type count() const
+ {
+ size_type n = 0;
+ for(size_type i=0; i<block_size; ++i)
+ n += getBit(i);
+ return n;
+ }
+
+ //! Returns true if any bits are set.
+ bool any() const
+ {
+ return count();
+ }
+
+ //! Returns true if no bits are set.
+ bool none() const
+ {
+ return ! any();
+ }
+
+ //! Returns true if all bits are set
+ bool all() const
+ {
+ for(size_type i=0; i<block_size; ++i)
+ if(not test(i))
+ return false;
+ return true;
+ }
+
+ //! Returns true if bit n is set.
+ bool test(size_type n) const
+ {
+ return getBit(n);
+ }
+
+ //! Return reference to the `i`-th bit
+ const_reference operator[](size_type i) const
+ {
+ return getBit(i);
+ }
+
+ //! cast to bitset
+ operator bitset() const
+ {
+ return blockBitField.getRepr(block_number);
+ }
+
+ //! Equality of reference and std::bitset
+ bool operator== (const bitset& bs) const
+ {
+ return equals(bs);
+ }
+
+ //! Equality of reference and other reference
+ bool operator== (const BitSetVectorConstReference& bs) const
+ {
+ return equals(bs);
+ }
+
+ //! Inequality of reference and std::bitset
+ bool operator!= (const bitset& bs) const
+ {
+ return ! equals(bs);
+ }
+
+ //! Inequality of reference and other reference
+ bool operator!= (const BitSetVectorConstReference& bs) const
+ {
+ return ! equals(bs);
+ }
+
+ /*!
+ missing operators:
+
+ - unsigned long to_ulong() const
+ */
+
+ friend std::ostream& operator<< (std::ostream& s, const BitSetVectorConstReference& v)
+ {
+ s << "(";
+ for(int i=0; i<block_size; ++i)
+ s << v[i];
+ s << ")";
+ return s;
+ }
+
+ protected:
+ const BitSetVector& blockBitField;
+ int block_number;
+
+ const_reference getBit(size_type i) const
+ {
+ return blockBitField.getBit(block_number,i);
+ }
+
+ template<class BS>
+ bool equals(const BS & bs) const
+ {
+ bool eq = true;
+ for(int i=0; i<block_size; ++i)
+ eq &= (getBit(i) == bs[i]);
+ return eq;
+ }
+
+ private:
+ /**
+ This is only a Proxy class, you can't get the address of the
+ object it references
+ */
+ void operator & () = delete;
+
+ friend class BitSetVectorReference<block_size, Alloc>;
+ };
+
+ /**
+ \brief A proxy class that acts as a mutable reference to a
+ single bitset in a BitSetVector.
+
+ It contains an assignment operator from std::bitset. It
+ inherits the const std::bitset interface provided by
+ BitSetVectorConstReference and adds most of the non-const
+ methods of std::bitset.
+
+ \warning As this is only a proxy class, you can not get the
+ address of the bitset.
+ */
+ template <int block_size, class Alloc>
+ class BitSetVectorReference : public BitSetVectorConstReference<block_size,Alloc>
+ {
+ protected:
+
+ typedef Dune::BitSetVector<block_size, Alloc> BitSetVector;
+ friend class Dune::BitSetVector<block_size, Alloc>;
+
+ typedef Dune::BitSetVectorConstReference<block_size,Alloc> BitSetVectorConstReference;
+
+ BitSetVectorReference(BitSetVector& blockBitField_, int block_number_) :
+ BitSetVectorConstReference(blockBitField_, block_number_),
+ blockBitField(blockBitField_)
+ {}
+
+ public:
+ typedef std::bitset<block_size> bitset;
+
+ //! bitset interface typedefs
+ //! \{
+ //! A proxy class that acts as a reference to a single bit.
+ typedef typename std::vector<bool, Alloc>::reference reference;
+ //! A proxy class that acts as a const reference to a single bit.
+ typedef typename std::vector<bool, Alloc>::const_reference const_reference;
+ //! \}
+
+ //! size_type typedef (an unsigned integral type)
+ typedef size_t size_type;
+
+ //! Assignment from bool, sets each bit in the bitset to b
+ BitSetVectorReference& operator=(bool b)
+ {
+ for(int i=0; i<block_size; ++i)
+ getBit(i) = b;
+ return (*this);
+ }
+
+ //! Assignment from bitset
+ BitSetVectorReference& operator=(const bitset & b)
+ {
+ for(int i=0; i<block_size; ++i)
+ getBit(i) = b.test(i);
+ return (*this);
+ }
+
+ //! Assignment from BitSetVectorConstReference
+ BitSetVectorReference& operator=(const BitSetVectorConstReference & b)
+ {
+ for(int i=0; i<block_size; ++i)
+ getBit(i) = b.test(i);
+ return (*this);
+ }
+
+ //! Assignment from BitSetVectorReference
+ BitSetVectorReference& operator=(const BitSetVectorReference & b)
+ {
+ for(int i=0; i<block_size; ++i)
+ getBit(i) = b.test(i);
+ return (*this);
+ }
+
+ //! Bitwise and (for bitset).
+ BitSetVectorReference& operator&=(const bitset& x)
+ {
+ for (size_type i=0; i<block_size; i++)
+ getBit(i) = (test(i) & x.test(i));
+ return *this;
+ }
+
+ //! Bitwise and (for BitSetVectorConstReference and BitSetVectorReference)
+ BitSetVectorReference& operator&=(const BitSetVectorConstReference& x)
+ {
+ for (size_type i=0; i<block_size; i++)
+ getBit(i) = (test(i) & x.test(i));
+ return *this;
+ }
+
+ //! Bitwise inclusive or (for bitset)
+ BitSetVectorReference& operator|=(const bitset& x)
+ {
+ for (size_type i=0; i<block_size; i++)
+ getBit(i) = (test(i) | x.test(i));
+ return *this;
+ }
+
+ //! Bitwise inclusive or (for BitSetVectorConstReference and BitSetVectorReference)
+ BitSetVectorReference& operator|=(const BitSetVectorConstReference& x)
+ {
+ for (size_type i=0; i<block_size; i++)
+ getBit(i) = (test(i) | x.test(i));
+ return *this;
+ }
+
+ //! Bitwise exclusive or (for bitset).
+ BitSetVectorReference& operator^=(const bitset& x)
+ {
+ for (size_type i=0; i<block_size; i++)
+ getBit(i) = (test(i) ^ x.test(i));
+ return *this;
+ }
+
+ private:
+
+ // For some reason, the following variant of operator^= triggers an ICE or a hanging
+ // compiler on Debian 9 with GCC 6.3 and full optimizations enabled (-O3).
+ // The only way to reliably avoid the issue is by making sure that the compiler does not
+ // see the XOR in the context of the function, so here is a little helper that will normally
+ // be inlined, but not on the broken compiler. This incurs a substantial overhead (a function
+ // call), but until someone else has a better idea, it's the only way to make it work reliably.
+
+ static bool xor_helper(bool a, bool b)
+#if defined(__GNUC__) && ! defined(__clang__) && __GNUC__ == 6 && __GNUC_MINOR__ == 3 && __cplusplus \
+ == 201402L
+ __attribute__((noinline))
+#endif
+ ;
+
+ public:
+
+ //! Bitwise exclusive or (for BitSetVectorConstReference and BitSetVectorReference)
+ BitSetVectorReference& operator^=(const BitSetVectorConstReference& x)
+ {
+ // This uses the helper from above to hoist the actual XOR computation out of the function for
+ // the buggy version of GCC.
+ for (size_type i=0; i<block_size; i++)
+ getBit(i) = xor_helper(test(i),x.test(i));
+ return *this;
+ }
+
+ //! Left shift.
+ BitSetVectorReference& operator<<=(size_type n)
+ {
+ for (size_type i=0; i<block_size-n; i++)
+ getBit(i) = test(i+n);
+ return *this;
+ }
+
+ //! Right shift.
+ BitSetVectorReference& operator>>=(size_type n)
+ {
+ for (size_type i=0; i<block_size-n; i++)
+ getBit(i+n) = test(i);
+ return *this;
+ }
+
+ //! Sets every bit.
+ BitSetVectorReference& set()
+ {
+ for (size_type i=0; i<block_size; i++)
+ set(i);
+ return *this;
+ }
+
+ //! Flips the value of every bit.
+ BitSetVectorReference& flip()
+ {
+ for (size_type i=0; i<block_size; i++)
+ flip(i);
+ return *this;
+ }
+
+ //! Clears every bit.
+ BitSetVectorReference& reset()
+ {
+ *this = false;
+ return *this;
+ }
+
+ //! Sets bit n if val is nonzero, and clears bit n if val is zero.
+ BitSetVectorReference& set(size_type n, int val = 1)
+ {
+ getBit(n) = val;
+ return *this;
+ }
+
+ //! Clears bit n.
+ BitSetVectorReference& reset(size_type n)
+ {
+ set(n, false);
+ return *this;
+ }
+
+ //! Flips bit n.
+ BitSetVectorReference& flip(size_type n)
+ {
+ getBit(n).flip();
+ return *this;
+ }
+
+ using BitSetVectorConstReference::test;
+ using BitSetVectorConstReference::operator[];
+
+ //! Return reference to the `i`-th bit
+ reference operator[](size_type i)
+ {
+ return getBit(i);
+ }
+
+ protected:
+ BitSetVector& blockBitField;
+
+ using BitSetVectorConstReference::getBit;
+
+ reference getBit(size_type i)
+ {
+ return blockBitField.getBit(this->block_number,i);
+ }
+ };
+
+ // implementation of helper - I put it into the template to avoid having
+ // to compile it in a dedicated compilation unit
+ template<int block_size, class Alloc>
+ bool BitSetVectorReference<block_size,Alloc>::xor_helper(bool a, bool b)
+ {
+ return a ^ b;
+ }
+
+ /**
+ typetraits for BitSetVectorReference
+ */
+ template<int block_size, class Alloc>
+ struct const_reference< BitSetVectorReference<block_size,Alloc> >
+ {
+ typedef BitSetVectorConstReference<block_size,Alloc> type;
+ };
+
+ template<int block_size, class Alloc>
+ struct const_reference< BitSetVectorConstReference<block_size,Alloc> >
+ {
+ typedef BitSetVectorConstReference<block_size,Alloc> type;
+ };
+
+ template<int block_size, class Alloc>
+ struct mutable_reference< BitSetVectorReference<block_size,Alloc> >
+ {
+ typedef BitSetVectorReference<block_size,Alloc> type;
+ };
+
+ template<int block_size, class Alloc>
+ struct mutable_reference< BitSetVectorConstReference<block_size,Alloc> >
+ {
+ typedef BitSetVectorReference<block_size,Alloc> type;
+ };
+
+ /**
+ \brief A dynamic %array of blocks of booleans
+ */
+ template <int block_size, class Allocator=std::allocator<bool> >
+ class BitSetVector : private std::vector<bool, Allocator>
+ {
+ /** \brief The implementation class: an unblocked bitfield */
+ typedef std::vector<bool, Allocator> BlocklessBaseClass;
+
+ public:
+ //! container interface typedefs
+ //! \{
+
+ /** \brief Type of the values stored by the container */
+ typedef std::bitset<block_size> value_type;
+
+ /** \brief Reference to a small block of bits */
+ typedef BitSetVectorReference<block_size,Allocator> reference;
+
+ /** \brief Const reference to a small block of bits */
+ typedef BitSetVectorConstReference<block_size,Allocator> const_reference;
+
+ /** \brief Pointer to a small block of bits */
+ typedef BitSetVectorReference<block_size,Allocator>* pointer;
+
+ /** \brief Const pointer to a small block of bits */
+ typedef BitSetVectorConstReference<block_size,Allocator>* const_pointer;
+
+ /** \brief size type */
+ typedef typename std::vector<bool, Allocator>::size_type size_type;
+
+ /** \brief The type of the allocator */
+ typedef Allocator allocator_type;
+ //! \}
+
+ //! iterators
+ //! \{
+ typedef Dune::GenericIterator<BitSetVector<block_size,Allocator>, value_type, reference, std::ptrdiff_t, ForwardIteratorFacade> iterator;
+ typedef Dune::GenericIterator<const BitSetVector<block_size,Allocator>, const value_type, const_reference, std::ptrdiff_t, ForwardIteratorFacade> const_iterator;
+ //! \}
+
+ //! Returns a iterator pointing to the beginning of the vector.
+ iterator begin(){
+ return iterator(*this, 0);
+ }
+
+ //! Returns a const_iterator pointing to the beginning of the vector.
+ const_iterator begin() const {
+ return const_iterator(*this, 0);
+ }
+
+ //! Returns an iterator pointing to the end of the vector.
+ iterator end(){
+ return iterator(*this, size());
+ }
+
+ //! Returns a const_iterator pointing to the end of the vector.
+ const_iterator end() const {
+ return const_iterator(*this, size());
+ }
+
+ //! Default constructor
+ BitSetVector() :
+ BlocklessBaseClass()
+ {}
+
+ //! Construction from an unblocked bitfield
+ BitSetVector(const BlocklessBaseClass& blocklessBitField) :
+ BlocklessBaseClass(blocklessBitField)
+ {
+ if (blocklessBitField.size()%block_size != 0)
+ DUNE_THROW(RangeError, "Vector size is not a multiple of the block size!");
+ }
+
+ /** Constructor with a given length
+ \param n Number of blocks
+ */
+ explicit BitSetVector(int n) :
+ BlocklessBaseClass(n*block_size)
+ {}
+
+ //! Constructor which initializes the field with true or false
+ BitSetVector(int n, bool v) :
+ BlocklessBaseClass(n*block_size,v)
+ {}
+
+ //! Erases all of the elements.
+ void clear()
+ {
+ BlocklessBaseClass::clear();
+ }
+
+ //! Resize field
+ void resize(int n, bool v = bool())
+ {
+ BlocklessBaseClass::resize(n*block_size, v);
+ }
+
+ /** \brief Return the number of blocks */
+ size_type size() const
+ {
+ return BlocklessBaseClass::size()/block_size;
+ }
+
+ //! Sets all entries to <tt> true </tt>
+ void setAll() {
+ this->assign(BlocklessBaseClass::size(), true);
+ }
+
+ //! Sets all entries to <tt> false </tt>
+ void unsetAll() {
+ this->assign(BlocklessBaseClass::size(), false);
+ }
+
+ /** \brief Return reference to i-th block */
+ reference operator[](int i)
+ {
+ return reference(*this, i);
+ }
+
+ /** \brief Return const reference to i-th block */
+ const_reference operator[](int i) const
+ {
+ return const_reference(*this, i);
+ }
+
+ /** \brief Return reference to last block */
+ reference back()
+ {
+ return reference(*this, size()-1);
+ }
+
+ /** \brief Return const reference to last block */
+ const_reference back() const
+ {
+ return const_reference(*this, size()-1);
+ }
+
+ //! Returns the number of bits that are set.
+ size_type count() const
+ {
+ return std::count(BlocklessBaseClass::begin(), BlocklessBaseClass::end(), true);
+ }
+
+ //! Returns the number of set bits, while each block is masked with 1<<i
+ size_type countmasked(int j) const
+ {
+ size_type n = 0;
+ size_type blocks = size();
+ for(size_type i=0; i<blocks; ++i)
+ n += getBit(i,j);
+ return n;
+ }
+
+ //! Send bitfield to an output stream
+ friend std::ostream& operator<< (std::ostream& s, const BitSetVector& v)
+ {
+ for (size_t i=0; i<v.size(); i++)
+ s << v[i] << " ";
+ return s;
+ }
+
+ private:
+
+ //! Get a representation as value_type
+ value_type getRepr(int i) const
+ {
+ value_type bits;
+ for(int j=0; j<block_size; ++j)
+ bits.set(j, getBit(i,j));
+ return bits;
+ }
+
+ typename std::vector<bool>::reference getBit(size_type i, size_type j) {
+ DUNE_ASSERT_BOUNDS(j < block_size);
+ DUNE_ASSERT_BOUNDS(i < size());
+ return BlocklessBaseClass::operator[](i*block_size+j);
+ }
+
+ typename std::vector<bool>::const_reference getBit(size_type i, size_type j) const {
+ DUNE_ASSERT_BOUNDS(j < block_size);
+ DUNE_ASSERT_BOUNDS(i < size());
+ return BlocklessBaseClass::operator[](i*block_size+j);
+ }
+
+ friend class BitSetVectorReference<block_size,Allocator>;
+ friend class BitSetVectorConstReference<block_size,Allocator>;
+ };
+
+} // namespace Dune
+
+#endif
--- /dev/null
+#ifndef DUNE_BOUNDSCHECKING_HH
+#define DUNE_BOUNDSCHECKING_HH
+
+#include <dune/common/exceptions.hh>
+
+/**
+ * \file
+ * \brief Macro for wrapping boundary checks
+ */
+
+/**
+ * @addtogroup Common
+ *
+ * @{
+ */
+
+#ifndef DUNE_ASSERT_BOUNDS
+#if defined(DUNE_CHECK_BOUNDS) || defined(DOXYGEN)
+
+/**
+ * \brief If `DUNE_CHECK_BOUNDS` is defined: check if condition
+ * \a cond holds; otherwise, do nothing.
+ *
+ * Meant to be used for conditions that assure writes and reads
+ * do not occur outside of memory limits or pre-defined patterns
+ * and related conditions.
+ */
+#define DUNE_ASSERT_BOUNDS(cond) \
+ do { \
+ if (!(cond)) \
+ DUNE_THROW(Dune::RangeError, "Index out of bounds."); \
+ } while (false)
+
+#else
+#define DUNE_ASSERT_BOUNDS(cond)
+#endif
+#endif
+
+/* @} */
+
+#endif // DUNE_BOUNDSCHECKING_HH
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_CLASSNAME_HH
+#define DUNE_CLASSNAME_HH
+
+/** \file
+ * \brief A free function to provide the demangled class name
+ * of a given object or type as a string
+ */
+
+#include <cstdlib>
+#include <memory>
+#include <string>
+#include <typeinfo>
+#include <type_traits>
+
+#if __has_include(<cxxabi.h>) && !DISABLE_CXA_DEMANGLE
+#define HAVE_CXA_DEMANGLE 1
+#include <cxxabi.h>
+#endif // #if HAVE_CXA_DEMANGLE
+
+namespace Dune {
+
+ namespace Impl {
+
+ inline std::string demangle(std::string name)
+ {
+#if HAVE_CXA_DEMANGLE
+ int status;
+ std::unique_ptr<char, void(*)(void*)>
+ demangled(abi::__cxa_demangle(name.c_str(), nullptr, nullptr, &status),
+ std::free);
+ if( demangled )
+ name = demangled.get();
+#endif // #if HAVE_CXA_DEMANGLE
+ return name;
+ }
+ }
+
+ /** \brief Provide the demangled class name of a type T as a string */
+ /*
+ * \ingroup CxxUtilities
+ */
+ template <class T>
+ std::string className ()
+ {
+ typedef typename std::remove_reference<T>::type TR;
+ std::string className = Impl::demangle( typeid( TR ).name() );
+ if (std::is_const<TR>::value)
+ className += " const";
+ if (std::is_volatile<TR>::value)
+ className += " volatile";
+ if (std::is_lvalue_reference<T>::value)
+ className += "&";
+ else if (std::is_rvalue_reference<T>::value)
+ className += "&&";
+ return className;
+ }
+
+ /** \brief Provide the demangled class name of a given object as a string */
+ /*
+ * \ingroup CxxUtilities
+ */
+ template <class T>
+ std::string className ( T&& v)
+ {
+ typedef typename std::remove_reference<T>::type TR;
+ std::string className = Impl::demangle( typeid(v).name() );
+ if (std::is_const<TR>::value)
+ className += " const";
+ if (std::is_volatile<TR>::value)
+ className += " volatile";
+ return className;
+ }
+} // namespace Dune
+
+#endif // DUNE_CLASSNAME_HH
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_COMMON_CONCEPT_HH
+#define DUNE_COMMON_CONCEPT_HH
+
+#include <type_traits>
+#include <utility>
+#include <tuple>
+
+#include <dune/common/typeutilities.hh>
+#include <dune/common/typelist.hh>
+#include <dune/common/tupleutility.hh>
+#include <dune/common/std/type_traits.hh>
+
+/**
+ * \file
+ *
+ * \brief Infrastructure for concepts.
+ */
+
+namespace Dune {
+
+/**
+ * \brief Namespace for concepts
+ *
+ * This namespace contains helper functions for
+ * concept definitions and the concept definitions
+ * themselves.
+ *
+ * \ingroup CxxConcepts
+ */
+namespace Concept {
+
+
+
+/**
+ * \brief Base class for refined concepts.
+ *
+ * If a new concept should refine one or more existing concepts,
+ * this can be achieved by deriving the new concept from
+ * Refines<C1,...,CN> where C1, ..., CN are the concepts
+ * to be refined. If you want to refine several concepts
+ * they should all be put in a single Refines<...> base
+ * class.
+ *
+ * \tparam BaseConcepts The list of concepts to be refined.
+ *
+ * \ingroup CxxConcepts
+ */
+template<class... BaseConcepts>
+struct Refines
+{
+ typedef TypeList<BaseConcepts...> BaseConceptList;
+};
+
+
+#ifndef DOXYGEN
+
+namespace Impl {
+
+ // #############################################################################
+ // # All functions following here are implementation details
+ // # for the models() function below.
+ // #############################################################################
+
+ // Forward declaration
+ template<class C, class... T>
+ constexpr bool models();
+
+
+
+ // Here is the implementation of the concept checking.
+ // The first two overloads do the magic for checking
+ // if the requirements of a concept are satisfied.
+ // The rest is just for checking base concepts in case
+ // of refinement.
+
+ // This overload is present if type substitution for
+ // C::require(T...) is successful, i.e., if the T...
+ // matches the requirement of C. In this case this
+ // overload is selected because PriorityTag<1>
+ // is a better match for PrioriryTag<42> than
+ // PriorityTag<0> in the default overload.
+ template<class C, class... T,
+ decltype(std::declval<C>().require(std::declval<T>()...), 0) =0>
+ constexpr std::true_type matchesRequirement(PriorityTag<1>)
+ { return {}; }
+
+ // If the above overload is ruled out by SFINAE because
+ // the T... does not match the requirements of C, then
+ // this default overload drops in.
+ template<class C, class... T>
+ constexpr std::false_type matchesRequirement(PriorityTag<0>)
+ { return {}; }
+
+
+
+ // An empty list C of concepts is always matched by T...
+ template<class...T>
+ constexpr bool modelsConceptList(TypeList<>)
+ { return true; }
+
+ // A nonempty list C0,..,CN of concepts is modeled
+ // by T... if it models the concept C0
+ // and all concepts in the list C1,..,CN.
+ template<class...T, class C0, class... CC>
+ constexpr bool modelsConceptList(TypeList<C0, CC...>)
+ { return models<C0, T...>() and modelsConceptList<T...>(TypeList<CC...>()); }
+
+
+
+ // If C is an unrefined concept, then T... models C
+ // if it matches the requirement of C.
+ template<class C, class... T>
+ constexpr bool modelsConcept(PriorityTag<0>)
+ { return matchesRequirement<C, T...>(PriorityTag<42>()); }
+
+ // If C is a refined concept, then T... models C
+ // if it matches the requirement of C and of
+ // all base concepts.
+ //
+ // This overload is used if C::BaseConceptList exists
+ // due to its higher priority.
+ template<class C, class... T,
+ decltype(typename C::BaseConceptList(), 0) = 0>
+ constexpr bool modelsConcept(PriorityTag<1>)
+ { return matchesRequirement<C, T...>(PriorityTag<42>()) and modelsConceptList<T...>(typename C::BaseConceptList()); }
+
+ // This is the full concept check. It's defined here in the
+ // implementation namespace with 'constexpr bool' return type
+ // because we need a forward declaration in order to use it
+ // internally above.
+ //
+ // The actual interface function can then call this one and
+ // return the result as std::integral_constant<bool,*> which
+ // does not allow for a forward declaration because the return
+ // type is deduced.
+ template<class C, class... T>
+ constexpr bool models()
+ {
+ return modelsConcept<C, T...>(PriorityTag<42>());
+ }
+
+} // namespace Dune::Concept::Impl
+
+#endif // DOXYGEN
+
+} // namespace Dune::Concept
+
+
+
+/**
+ * \brief Check if concept is modeled by given types
+ *
+ * This will check if the given concept is modeled by the given
+ * list of types. This is true if the list of types models all
+ * the base concepts that are refined by the given concept
+ * and if it satisfies all additional requirements of the latter.
+ *
+ * Notice that a concept may be defined for a list of interacting types.
+ * The function will check if the given list of types matches the requirements
+ * on the whole list. It does not check if each individual type in the list
+ * satisfies the concept.
+ *
+ * This concept check mechanism is inspired by the concept checking
+ * facility in Eric Nieblers range-v3. For more information please
+ * refer to the libraries project page https://github.com/ericniebler/range-v3
+ * or this blog entry: http://ericniebler.com/2013/11/23/concept-checking-in-c11.
+ * In fact the interface provided here is almost exactly the same as in range-v3.
+ * However the implementation differs, because range-v3 uses its own meta-programming
+ * library whereas our implementation is more straight forward.
+ *
+ * The result is returned as std::integral_constant<bool, ...> which
+ * allows to nicely use this method with Hybrid::ifElse.
+ *
+ * \tparam C The concept to check
+ * \tparam T The list of type to check against the concept
+ *
+ * \ingroup CxxConcepts
+ */
+template<class C, class... T>
+constexpr auto models()
+{
+ return Std::bool_constant<Concept::Impl::models<C, T...>()>();
+}
+
+
+
+namespace Concept {
+
+#ifndef DOXYGEN
+
+namespace Impl {
+
+ // #############################################################################
+ // # All functions following here are implementation details for the
+ // # for the tupleEntriesModel() function below.
+ // #############################################################################
+
+ template<class C, class Tuple>
+ struct TupleEntriesModelHelper
+ {
+ template<class Accumulated, class T>
+ struct AccumulateFunctor
+ {
+ using type = typename std::integral_constant<bool, Accumulated::value and models<C, T>()>;
+ };
+ using Result = typename ReduceTuple<AccumulateFunctor, Tuple, std::true_type>::type;
+ };
+
+} // namespace Dune::Concept::Impl
+
+#endif // DOXYGEN
+
+
+// #############################################################################
+// # The method tupleEntriesModel() does the actual check if the types in a tuple
+// # model a concept using the implementation details above.
+// #############################################################################
+
+template<class C, class Tuple>
+constexpr auto tupleEntriesModel()
+ -> typename Impl::TupleEntriesModelHelper<C, Tuple>::Result
+{
+ return {};
+}
+
+// #############################################################################
+// # The following require*() functions are just helpers that allow to
+// # propagate a failed check as substitution failure. This is useful
+// # inside of a concept definition.
+// #############################################################################
+
+// Helper function for use in concept definitions.
+// If the passed value b is not true, the concept will to be satisfied.
+template<bool b, typename std::enable_if<b, int>::type = 0>
+constexpr bool requireTrue()
+{
+ return true;
+}
+
+// Helper function for use in concept definitions.
+template<class C, class... T, typename std::enable_if<models<C, T...>(), int>::type = 0>
+constexpr bool requireConcept()
+{
+ return true;
+}
+
+// Helper function for use in concept definitions.
+// This allows to avoid using decltype
+template<class C, class... T, typename std::enable_if<models<C, T...>(), int>::type = 0>
+constexpr bool requireConcept(T&&... /*t*/)
+{
+ return true;
+}
+
+// Helper function for use in concept definitions.
+// This checks if the concept given as first type is modelled by all types in the tuple passed as argument
+template<class C, class Tuple, typename std::enable_if<tupleEntriesModel<C, Tuple>(), int>::type = 0>
+constexpr bool requireConceptForTupleEntries()
+{
+ return true;
+}
+
+// Helper function for use in concept definitions.
+// If the first passed type is not convertible to the second, the concept will not be satisfied.
+template<class From, class To,
+ typename std::enable_if< std::is_convertible<From, To>::value, int>::type = 0>
+constexpr bool requireConvertible()
+{
+ return true;
+}
+
+// Helper function for use in concept definitions.
+// If passed argument is not convertible to the first passed type, the concept will not be satisfied.
+template<class To, class From,
+ typename std::enable_if< std::is_convertible<From, To>::value, int>::type = 0>
+constexpr bool requireConvertible(const From&)
+{
+ return true;
+}
+
+// Helper function for use in concept definitions.
+// This will always evaluate to true. If just allow
+// to turn a type into an expression. The failure happens
+// already during substitution for the type argument.
+template<typename T>
+constexpr bool requireType()
+{
+ return true;
+}
+
+// Helper function for use in concept definitions.
+// If first passed type is not a base class of second type, the concept will not be satisfied.
+template<class Base, class Derived,
+ typename std::enable_if< std::is_base_of<Base, Derived>::value, int>::type = 0>
+constexpr bool requireBaseOf()
+{
+ return true;
+}
+
+// Helper function for use in concept definitions.
+// If first passed type is not a base class of first arguments type, the concept will not be satisfied.
+template<class Base, class Derived,
+ typename std::enable_if< std::is_base_of<Base, Derived>::value, int>::type = 0>
+constexpr bool requireBaseOf(const Derived&)
+{
+ return true;
+}
+
+// Helper function for use in concept definitions.
+// If the passed types are not the same, the concept will not be satisfied.
+template<class A, class B,
+ typename std::enable_if< std::is_same<A, B>::value, int>::type = 0>
+constexpr bool requireSameType()
+{
+ return true;
+}
+
+
+
+} // namespace Dune::Concept
+
+ /** @} */
+
+} // namespace Dune
+
+
+
+
+#endif // DUNE_COMMON_CONCEPT_HH
--- /dev/null
+#ifndef DUNE_COMMON_CONDITIONAL_HH
+#define DUNE_COMMON_CONDITIONAL_HH
+
+namespace Dune
+{
+
+ /** \brief conditional evaluate
+
+ sometimes call immediate if, evaluates to
+
+ \code
+ if (b)
+ return v1;
+ else
+ return v2;
+ \endcode
+
+ In contrast to if-then-else the cond function can also be
+ evaluated for vector valued SIMD data types, see simd.hh.
+
+ \param b boolean value
+ \param v1 value of b==true
+ \param v2 value of b==false
+ */
+ template<typename T1, typename T2>
+ const T1 cond(bool b, const T1 & v1, const T2 & v2)
+ {
+ return (b ? v1 : v2);
+ }
+
+} // end namespace Dune
+
+#endif // DUNE_COMMON_CONDITIONAL_HH
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+#include <config.h>
+
+#include <cstddef>
+#include <cstdlib>
+#include <functional>
+#include <ios>
+#include <iostream>
+#include <utility>
+
+#include <dune/common/debugalign.hh>
+
+namespace Dune {
+
+ //! default alignment violation handler
+ /**
+ * Prints it's arguments on `stderr` and aborts.
+ */
+ static void defaultViolatedAlignment(const char *className,
+ std::size_t expectedAlignment,
+ const void *address)
+ {
+ std::cerr << "Error: Detected invalid alignment for type " << className
+ << ": Address " << address << " not aligned to 0x" << std::hex
+ << expectedAlignment << std::endl;
+ std::abort();
+ }
+
+ ViolatedAlignmentHandler &violatedAlignmentHandler()
+ {
+ static ViolatedAlignmentHandler handler = defaultViolatedAlignment;
+ return handler;
+ }
+
+ void violatedAlignment(const char *className, std::size_t expectedAlignment,
+ const void *address)
+ {
+ const auto &handler = violatedAlignmentHandler();
+ if(handler)
+ handler(className, expectedAlignment, address);
+ }
+
+} // namespace Dune
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_DEBUGALIGN_HH
+#define DUNE_DEBUGALIGN_HH
+
+#include <algorithm>
+#include <cassert>
+#include <cmath>
+#include <complex>
+#include <cstddef>
+#include <cstdint>
+#include <cstdlib> // abs
+#include <functional>
+#include <istream>
+#include <ostream>
+#include <type_traits>
+#include <utility>
+
+#include <dune/common/classname.hh>
+#include <dune/common/indices.hh>
+#include <dune/common/simd/base.hh>
+#include <dune/common/simd/defaults.hh>
+#include <dune/common/typetraits.hh>
+
+namespace Dune {
+
+ //! type of the handler called by `violatedAlignment()`
+ using ViolatedAlignmentHandler =
+ std::function<void(const char*, std::size_t, const void*)>;
+
+ //! access the handler called by `violatedAlignment()`
+ /**
+ * This may be used to obtain the handler for the purpose of calling, or for
+ * saving it somewhere to restore it later. It may also be used to set the
+ * handler simply by assigning a new handler. Setting the handler races
+ * with other accesses.
+ */
+ ViolatedAlignmentHandler &violatedAlignmentHandler();
+
+ //! called when an alignment violation is detected
+ /**
+ * \p className Name of the class whose alignment was violated
+ * \p expectedAlignment The (over-)alignment that the class expected
+ * \p address The address the class actually found itself at.
+ *
+ * The main purpose of the function is to serve as a convenient breakpoint
+ * for debugging -- which is why we put it in an external compilation unit
+ * so it isn't inlined.
+ */
+ void violatedAlignment(const char *className, std::size_t expectedAlignment,
+ const void *address);
+
+ //! check whether an address conforms to the given alignment
+ inline bool isAligned(const void *p, std::size_t align)
+ {
+ // a more portable way to do this would be to abuse std::align(), but that
+ // isn't supported by g++-4.9 yet
+ return std::uintptr_t(p) % align == 0;
+ }
+
+ //! CRTP base mixin class to check alignment
+ template<std::size_t align, class Impl>
+ class alignas(align) AlignedBase
+ {
+ void checkAlignment() const
+ {
+ auto pimpl = static_cast<const Impl*>(this);
+ if(!isAligned(pimpl, align))
+ violatedAlignment(className<Impl>().c_str(), align, pimpl);
+ }
+ public:
+ AlignedBase() { checkAlignment(); }
+ AlignedBase(const AlignedBase &) { checkAlignment(); }
+ AlignedBase(AlignedBase &&) { checkAlignment(); }
+ ~AlignedBase() { checkAlignment(); }
+
+ AlignedBase& operator=(const AlignedBase &) = default;
+ AlignedBase& operator=(AlignedBase &&) = default;
+ };
+
+ //! an alignment large enough to trigger alignment errors
+ static constexpr auto debugAlignment = 2*alignof(std::max_align_t);
+
+ namespace AlignedNumberImpl {
+
+ template<class T, std::size_t align = debugAlignment>
+ class AlignedNumber;
+
+ } // namespace AlignedNumberImpl
+
+ using AlignedNumberImpl::AlignedNumber;
+
+ //! align a value to a certain alignment
+ template<std::size_t align = debugAlignment, class T>
+ AlignedNumber<T, align> aligned(T value) { return { std::move(value) }; }
+
+ // The purpose of this namespace is to move the `<cmath>` function overloads
+ // out of namespace `Dune`. This avoids problems where people called
+ // e.g. `sqrt(1.0)` inside the `Dune` namespace, without first doing `using
+ // std::sqrt;`. Without any `Dune::sqrt()`, such a use will find
+ // `::sqrt()`, but with `Dune::sqrt()` it will find only `Dune::sqrt()`,
+ // which does not have an overload for `double`.
+ namespace AlignedNumberImpl {
+
+ //! aligned wrappers for arithmetic types
+ template<class T, std::size_t align>
+ class AlignedNumber
+ : public AlignedBase<align, AlignedNumber<T, align> >
+ {
+ T value_;
+
+ public:
+ AlignedNumber() = default;
+ AlignedNumber(T value) : value_(std::move(value)) {}
+ template<class U, std::size_t uAlign,
+ class = std::enable_if_t<(align >= uAlign) &&
+ std::is_convertible<U, T>::value> >
+ AlignedNumber(const AlignedNumber<U, uAlign> &o) : value_(U(o)) {}
+
+ // accessors
+ template<class U,
+ class = std::enable_if_t<std::is_convertible<T, U>::value> >
+ explicit operator U() const { return value_; }
+
+ const T &value() const { return value_; }
+ T &value() { return value_; }
+
+ // I/O
+ template<class charT, class Traits>
+ friend std::basic_istream<charT, Traits>&
+ operator>>(std::basic_istream<charT, Traits>& str, AlignedNumber &u)
+ {
+ return str >> u.value_;
+ }
+
+ template<class charT, class Traits>
+ friend std::basic_ostream<charT, Traits>&
+ operator<<(std::basic_ostream<charT, Traits>& str,
+ const AlignedNumber &u)
+ {
+ return str << u.value_;
+ }
+
+ // The trick with `template<class U = T, class = std::void_t<expr(U)> >` is
+ // needed because at least g++-4.9 seems to evaluates a default argument
+ // in `template<class = std::void_t<expr(T))> >` as soon as possible and will
+ // error out if `expr(T)` is invalid. E.g. for `expr(T)` =
+ // `decltype(--std::declval<T&>())`, instantiating `AlignedNumber<bool>`
+ // will result in an unrecoverable error (`--` cannot be applied to a
+ // `bool`).
+
+ // Increment, decrement
+ template<class U = T, class = std::void_t<decltype(++std::declval<U&>())> >
+ AlignedNumber &operator++() { ++value_; return *this; }
+
+ template<class U = T, class = std::void_t<decltype(--std::declval<U&>())> >
+ AlignedNumber &operator--() { --value_; return *this; }
+
+ template<class U = T, class = std::void_t<decltype(std::declval<U&>()++)> >
+ decltype(auto) operator++(int) { return aligned<align>(value_++); }
+
+ template<class U = T, class = std::void_t<decltype(std::declval<U&>()--)> >
+ decltype(auto) operator--(int) { return aligned<align>(value_--); }
+
+ // unary operators
+ template<class U = T,
+ class = std::void_t<decltype(+std::declval<const U&>())> >
+ decltype(auto) operator+() const { return aligned<align>(+value_); }
+
+ template<class U = T,
+ class = std::void_t<decltype(-std::declval<const U&>())> >
+ decltype(auto) operator-() const { return aligned<align>(-value_); }
+
+ /*
+ * silence warnings from GCC about using `~` on a bool
+ * (when instantiated for T=bool)
+ */
+#if __GNUC__ >= 7
+# pragma GCC diagnostic push
+# pragma GCC diagnostic ignored "-Wbool-operation"
+#endif
+ template<class U = T,
+ class = std::void_t<decltype(~std::declval<const U&>())> >
+ decltype(auto) operator~() const { return aligned<align>(~value_); }
+#if __GNUC__ >= 7
+# pragma GCC diagnostic pop
+#endif
+
+ template<class U = T,
+ class = std::void_t<decltype(!std::declval<const U&>())> >
+ decltype(auto) operator!() const { return aligned<align>(!value_); }
+
+ // assignment operators
+#define DUNE_ASSIGN_OP(OP) \
+ template<class U, std::size_t uAlign, \
+ class = std::enable_if_t< \
+ ( uAlign <= align && \
+ sizeof(std::declval<T&>() OP std::declval<U>()) ) \
+ > > \
+ AlignedNumber &operator OP(const AlignedNumber<U, uAlign> &u) \
+ { \
+ value_ OP U(u); \
+ return *this; \
+ } \
+ \
+ template<class U, \
+ class = std::void_t<decltype(std::declval<T&>() OP \
+ std::declval<U>())> > \
+ AlignedNumber &operator OP(const U &u) \
+ { \
+ value_ OP u; \
+ return *this; \
+ } \
+ \
+ static_assert(true, "Require semicolon to unconfuse editors")
+
+ DUNE_ASSIGN_OP(+=);
+ DUNE_ASSIGN_OP(-=);
+
+ DUNE_ASSIGN_OP(*=);
+ DUNE_ASSIGN_OP(/=);
+ DUNE_ASSIGN_OP(%=);
+
+ DUNE_ASSIGN_OP(^=);
+ DUNE_ASSIGN_OP(&=);
+ DUNE_ASSIGN_OP(|=);
+
+ DUNE_ASSIGN_OP(<<=);
+ DUNE_ASSIGN_OP(>>=);
+
+#undef DUNE_ASSIGN_OP
+ };
+
+ // binary operators
+#define DUNE_BINARY_OP(OP) \
+ template<class T, std::size_t tAlign, class U, std::size_t uAlign, \
+ class = std::void_t<decltype(std::declval<T>() \
+ OP std::declval<U>())> > \
+ decltype(auto) \
+ operator OP(const AlignedNumber<T, tAlign> &t, \
+ const AlignedNumber<U, uAlign> &u) \
+ { \
+ /* can't use std::max(); not constexpr */ \
+ return aligned<(tAlign > uAlign ? tAlign : uAlign)>(T(t) OP U(u)); \
+ } \
+ \
+ template<class T, class U, std::size_t uAlign, \
+ class = std::void_t<decltype(std::declval<T>() \
+ OP std::declval<U>())> > \
+ decltype(auto) \
+ operator OP(const T &t, const AlignedNumber<U, uAlign> &u) \
+ { \
+ return aligned<uAlign>(t OP U(u)); \
+ } \
+ \
+ template<class T, std::size_t tAlign, class U, \
+ class = std::void_t<decltype(std::declval<T>() \
+ OP std::declval<U>())> > \
+ decltype(auto) \
+ operator OP(const AlignedNumber<T, tAlign> &t, const U &u) \
+ { \
+ return aligned<tAlign>(T(t) OP u); \
+ } \
+ \
+ static_assert(true, "Require semicolon to unconfuse editors")
+
+ DUNE_BINARY_OP(+);
+ DUNE_BINARY_OP(-);
+
+ DUNE_BINARY_OP(*);
+ DUNE_BINARY_OP(/);
+ DUNE_BINARY_OP(%);
+
+ DUNE_BINARY_OP(^);
+ DUNE_BINARY_OP(&);
+ DUNE_BINARY_OP(|);
+
+ DUNE_BINARY_OP(<<);
+ DUNE_BINARY_OP(>>);
+
+ DUNE_BINARY_OP(==);
+ DUNE_BINARY_OP(!=);
+ DUNE_BINARY_OP(<);
+ DUNE_BINARY_OP(>);
+ DUNE_BINARY_OP(<=);
+ DUNE_BINARY_OP(>=);
+
+ DUNE_BINARY_OP(&&);
+ DUNE_BINARY_OP(||);
+
+#undef DUNE_BINARY_OP
+
+ //////////////////////////////////////////////////////////////////////
+ //
+ // Overloads for the functions provided by the standard library
+ //
+#define DUNE_UNARY_FUNC(name) \
+ template<class T, std::size_t align> \
+ decltype(auto) name(const AlignedNumber<T, align> &u) \
+ { \
+ using std::name; \
+ return aligned<align>(name(T(u))); \
+ } \
+ static_assert(true, "Require semicolon to unconfuse editors")
+
+ //
+ // <cmath> functions
+ //
+
+ // note: only unary functions are provided at the moment. Getting all the
+ // overloads right for functions with more than one argument is tricky.
+ // All <cmath> functions appear in the list below in the order they are
+ // listed in in the standard, but the unimplemented ones are commented
+ // out.
+
+ // note: abs is provided by both <cstdlib> (for integer) and <cmath> (for
+ // floating point). This overload works for both.
+ DUNE_UNARY_FUNC(abs);
+ DUNE_UNARY_FUNC(acos);
+ DUNE_UNARY_FUNC(acosh);
+ DUNE_UNARY_FUNC(asin);
+ DUNE_UNARY_FUNC(asinh);
+ DUNE_UNARY_FUNC(atan);
+ // atan2
+ DUNE_UNARY_FUNC(atanh);
+ DUNE_UNARY_FUNC(cbrt);
+ DUNE_UNARY_FUNC(ceil);
+ // copysign
+ DUNE_UNARY_FUNC(cos);
+ DUNE_UNARY_FUNC(cosh);
+ DUNE_UNARY_FUNC(erf);
+ DUNE_UNARY_FUNC(erfc);
+ DUNE_UNARY_FUNC(exp);
+ DUNE_UNARY_FUNC(exp2);
+ DUNE_UNARY_FUNC(expm1);
+ DUNE_UNARY_FUNC(fabs);
+ // fdim
+ DUNE_UNARY_FUNC(floor);
+ // fma
+ // fmax
+ // fmin
+ // fmod
+ // frexp
+ // hypos
+ DUNE_UNARY_FUNC(ilogb);
+ // ldexp
+ DUNE_UNARY_FUNC(lgamma);
+ DUNE_UNARY_FUNC(llrint);
+ DUNE_UNARY_FUNC(llround);
+ DUNE_UNARY_FUNC(log);
+ DUNE_UNARY_FUNC(log10);
+ DUNE_UNARY_FUNC(log1p);
+ DUNE_UNARY_FUNC(log2);
+ DUNE_UNARY_FUNC(logb);
+ DUNE_UNARY_FUNC(lrint);
+ DUNE_UNARY_FUNC(lround);
+ // modf
+ DUNE_UNARY_FUNC(nearbyint);
+ // nextafter
+ // nexttoward
+ // pow
+ // remainder
+ // remquo
+ DUNE_UNARY_FUNC(rint);
+ DUNE_UNARY_FUNC(round);
+ // scalbln
+ // scalbn
+ DUNE_UNARY_FUNC(sin);
+ DUNE_UNARY_FUNC(sinh);
+ DUNE_UNARY_FUNC(sqrt);
+ DUNE_UNARY_FUNC(tan);
+ DUNE_UNARY_FUNC(tanh);
+ DUNE_UNARY_FUNC(tgamma);
+ DUNE_UNARY_FUNC(trunc);
+
+ DUNE_UNARY_FUNC(isfinite);
+ DUNE_UNARY_FUNC(isinf);
+ DUNE_UNARY_FUNC(isnan);
+ DUNE_UNARY_FUNC(isnormal);
+ DUNE_UNARY_FUNC(signbit);
+
+ // isgreater
+ // isgreaterequal
+ // isless
+ // islessequal
+ // islessgreater
+ // isunordered
+
+ //
+ // <complex> functions
+ //
+
+ // not all functions are implemented, and unlike for <cmath>, no
+ // comprehensive list is provided
+ DUNE_UNARY_FUNC(real);
+
+#undef DUNE_UNARY_FUNC
+
+ // We need to overload min() and max() since they require types to be
+ // LessThanComparable, which requires `a<b` to be "convertible to bool".
+ // That wording seems to be a leftover from C++03, and today is probably
+ // equivalent to "implicitly convertible". There is also issue 2114
+ // <https://cplusplus.github.io/LWG/issue2114> in the standard (still open
+ // as of 2018-07-06), which strives to require both "implicitly" and
+ // "contextually" convertible -- plus a few other things.
+ //
+ // We do not want our debug type to automatically decay to the underlying
+ // type, so we do not want to make the conversion non-explicit. So the
+ // only option left is to overload min() and max().
+
+ template<class T, std::size_t align>
+ auto max(const AlignedNumber<T, align> &a,
+ const AlignedNumber<T, align> &b)
+ {
+ using std::max;
+ return aligned<align>(max(T(a), T(b)));
+ }
+
+ template<class T, std::size_t align>
+ auto max(const T &a, const AlignedNumber<T, align> &b)
+ {
+ using std::max;
+ return aligned<align>(max(a, T(b)));
+ }
+
+ template<class T, std::size_t align>
+ auto max(const AlignedNumber<T, align> &a, const T &b)
+ {
+ using std::max;
+ return aligned<align>(max(T(a), b));
+ }
+
+ template<class T, std::size_t align>
+ auto min(const AlignedNumber<T, align> &a,
+ const AlignedNumber<T, align> &b)
+ {
+ using std::min;
+ return aligned<align>(min(T(a), T(b)));
+ }
+
+ template<class T, std::size_t align>
+ auto min(const T &a, const AlignedNumber<T, align> &b)
+ {
+ using std::min;
+ return aligned<align>(min(a, T(b)));
+ }
+
+ template<class T, std::size_t align>
+ auto min(const AlignedNumber<T, align> &a, const T &b)
+ {
+ using std::min;
+ return aligned<align>(min(T(a), b));
+ }
+
+ } // namespace AlignedNumberImpl
+
+ // SIMD-like functions from "conditional.hh"
+ template<class T, std::size_t align>
+ AlignedNumber<T, align>
+ cond(const AlignedNumber<bool, align> &b,
+ const AlignedNumber<T, align> &v1, const AlignedNumber<T, align> &v2)
+ {
+ return b ? v1 : v2;
+ }
+
+ // SIMD-like functions from "rangeutilities.hh"
+ template<class T, std::size_t align>
+ T max_value(const AlignedNumber<T, align>& val)
+ {
+ return T(val);
+ }
+
+ template<class T, std::size_t align>
+ T min_value(const AlignedNumber<T, align>& val)
+ {
+ return T(val);
+ }
+
+ template<std::size_t align>
+ bool any_true(const AlignedNumber<bool, align>& val)
+ {
+ return bool(val);
+ }
+
+ template<std::size_t align>
+ bool all_true(const AlignedNumber<bool, align>& val)
+ {
+ return bool(val);
+ }
+
+ // SIMD-like functionality from "simd/interface.hh"
+ namespace Simd {
+ namespace Overloads {
+
+ template<class T, std::size_t align>
+ struct ScalarType<AlignedNumber<T, align> > { using type = T; };
+
+ template<class U, class T, std::size_t align>
+ struct RebindType<U, AlignedNumber<T, align> > {
+ using type = AlignedNumber<U, align>;
+ };
+
+ template<class T, std::size_t align>
+ struct LaneCount<AlignedNumber<T, align> > : index_constant<1> {};
+
+ template<class T, std::size_t align>
+ T& lane(ADLTag<5>, std::size_t l, AlignedNumber<T, align> &v)
+ {
+ assert(l == 0);
+ return v.value();
+ }
+
+ template<class T, std::size_t align>
+ T lane(ADLTag<5>, std::size_t l, const AlignedNumber<T, align> &v)
+ {
+ assert(l == 0);
+ return v.value();
+ }
+
+ template<class T, std::size_t align>
+ const AlignedNumber<T, align> &
+ cond(ADLTag<5>, AlignedNumber<bool, align> mask,
+ const AlignedNumber<T, align> &ifTrue,
+ const AlignedNumber<T, align> &ifFalse)
+ {
+ return mask ? ifTrue : ifFalse;
+ }
+
+ template<std::size_t align>
+ bool anyTrue(ADLTag<5>, const AlignedNumber<bool, align> &mask)
+ {
+ return bool(mask);
+ }
+
+ } // namespace Overloads
+
+ } // namespace Simd
+
+} // namespace Dune
+
+#endif // DUNE_DEBUGALIGN_HH
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#if HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "debugallocator.hh"
+
+#if HAVE_MPROTECT
+
+#include <iostream>
+#include <unistd.h>
+#include <cstdlib>
+
+namespace Dune
+{
+ namespace DebugMemory
+ {
+ // system constant for page size
+ const std::ptrdiff_t page_size = sysconf(_SC_PAGESIZE);
+
+ // implement member functions
+ void AllocationManager::allocation_error(const char* msg)
+ {
+ std::cerr << "Abort - Memory Corruption: " << msg << std::endl;
+ std::abort();
+ }
+
+ // global instance of AllocationManager
+ AllocationManager alloc_man;
+
+ } // end namespace DebugMemory
+} // end namespace Dune
+
+#endif // HAVE_MPROTECT
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_DEBUG_ALLOCATOR_HH
+#define DUNE_DEBUG_ALLOCATOR_HH
+
+#if __has_include(<sys/mman.h>)
+
+#include <sys/mman.h>
+#define HAVE_SYS_MMAN_H 1
+#define HAVE_MPROTECT 1
+
+#include <exception>
+#include <typeinfo>
+#include <vector>
+#include <iostream>
+#include <cstring>
+#include <cstdint>
+#include <cstdlib>
+#include <new>
+
+#include "mallocallocator.hh"
+
+namespace Dune
+{
+
+#ifndef DOXYGEN // hide implementation details from doxygen
+ namespace DebugMemory
+ {
+
+ extern const std::ptrdiff_t page_size;
+
+ struct AllocationManager
+ {
+ typedef std::size_t size_type;
+ typedef std::ptrdiff_t difference_type;
+ typedef void* pointer;
+
+ protected:
+ static void allocation_error(const char* msg);
+
+ struct AllocationInfo;
+ friend struct AllocationInfo;
+
+#define ALLOCATION_ASSERT(A) { if (!(A)) \
+ { allocation_error("Assertion " # A " failed");\
+ }\
+};
+
+ struct AllocationInfo
+ {
+ AllocationInfo(const std::type_info & t) : type(&t) {}
+ const std::type_info * type;
+
+ pointer page_ptr;
+ pointer ptr;
+ size_type pages;
+ size_type capacity;
+ size_type size;
+ bool not_free;
+ };
+
+ typedef MallocAllocator<AllocationInfo> Alloc;
+ typedef std::vector<AllocationInfo, Alloc> AllocationList;
+ AllocationList allocation_list;
+
+ private:
+ void memprotect([[maybe_unused]] void* from,
+ [[maybe_unused]] difference_type len,
+ [[maybe_unused]] int prot)
+ {
+#if HAVE_SYS_MMAN_H && HAVE_MPROTECT
+ int result = mprotect(from, len, prot);
+ if (result == -1)
+ {
+
+ std::cerr << "ERROR: (" << result << ": " << strerror(result) << ")" << std::endl;
+ std::cerr << " Failed to ";
+ if (prot == PROT_NONE)
+ std::cerr << "protect ";
+ else
+ std::cerr << "unprotect ";
+ std::cerr << "memory range: "
+ << from << ", "
+ << static_cast<void*>(
+ static_cast<char*>(from) + len)
+ << std::endl;
+ abort();
+ }
+#else
+ std::cerr << "WARNING: memory protection not available" << std::endl;
+#endif
+ }
+
+ public:
+
+ ~AllocationManager ()
+ {
+ AllocationList::iterator it;
+ bool error = false;
+ for (it=allocation_list.begin(); it!=allocation_list.end(); it++)
+ {
+ if (it->not_free)
+ {
+ std::cerr << "ERROR: found memory chunk still in use: " <<
+ it->capacity << " bytes at " << it->ptr << std::endl;
+ error = true;
+ }
+ munmap(it->page_ptr, it->pages * page_size);
+ }
+ if (error)
+ allocation_error("lost allocations");
+ }
+
+ template<typename T>
+ T* allocate(size_type n)
+ {
+ // setup chunk info
+ AllocationInfo ai(typeid(T));
+ ai.size = n;
+ ai.capacity = n * sizeof(T);
+ ai.pages = (ai.capacity) / page_size + 2;
+ ai.not_free = true;
+ size_type overlap = ai.capacity % page_size;
+ ai.page_ptr = mmap(NULL, ai.pages * page_size,
+ PROT_READ | PROT_WRITE,
+#ifdef __APPLE__
+ MAP_ANON | MAP_PRIVATE,
+#else
+ MAP_ANONYMOUS | MAP_PRIVATE,
+#endif
+ -1, 0);
+ if (MAP_FAILED == ai.page_ptr)
+ {
+ throw std::bad_alloc();
+ }
+ ai.ptr = static_cast<char*>(ai.page_ptr) + page_size - overlap;
+ // write protect memory behind the actual data
+ memprotect(static_cast<char*>(ai.page_ptr) + (ai.pages-1) * page_size,
+ page_size,
+ PROT_NONE);
+ // remember the chunk
+ allocation_list.push_back(ai);
+ // return the ptr
+ return static_cast<T*>(ai.ptr);
+ }
+
+ template<typename T>
+ void deallocate(T* ptr, size_type n = 0) noexcept
+ {
+ // compute page address
+ void* page_ptr =
+ static_cast<void*>(
+ (char*)(ptr) - ((std::uintptr_t)(ptr) % page_size));
+ // search list
+ AllocationList::iterator it;
+ unsigned int i = 0;
+ for (it=allocation_list.begin(); it!=allocation_list.end(); it++, i++)
+ {
+ if (it->page_ptr == page_ptr)
+ {
+ // std::cout << "found memory_block in allocation " << i << std::endl;
+ // sanity checks
+ if (n != 0)
+ ALLOCATION_ASSERT(n == it->size);
+ ALLOCATION_ASSERT(ptr == it->ptr);
+ ALLOCATION_ASSERT(true == it->not_free);
+ ALLOCATION_ASSERT(typeid(T) == *(it->type));
+ // free memory
+ it->not_free = false;
+#if DEBUG_ALLOCATOR_KEEP
+ // write protect old memory
+ memprotect(it->page_ptr,
+ (it->pages) * page_size,
+ PROT_NONE);
+#else
+ // unprotect old memory
+ memprotect(it->page_ptr,
+ (it->pages) * page_size,
+ PROT_READ | PROT_WRITE);
+ munmap(it->page_ptr, it->pages * page_size);
+ // remove chunk info
+ allocation_list.erase(it);
+#endif
+ return;
+ }
+ }
+ allocation_error("memory block not found");
+ }
+ };
+#undef ALLOCATION_ASSERT
+
+ extern AllocationManager alloc_man;
+ } // end namespace DebugMemory
+#endif // DOXYGEN
+
+ template<class T>
+ class DebugAllocator;
+
+ // specialize for void
+ template <>
+ class DebugAllocator<void> {
+ public:
+ typedef void* pointer;
+ typedef const void* const_pointer;
+ // reference to void members are impossible.
+ typedef void value_type;
+ template <class U> struct rebind {
+ typedef DebugAllocator<U> other;
+ };
+ };
+
+ // actual implementation
+ /**
+ @ingroup Allocators
+ @brief Allocators implementation which performs different kind of memory checks
+
+ We check:
+ - access past the end
+ - only free memory which was allocated with this allocator
+ - list allocated memory chunks still in use upon destruction of the allocator
+
+ When defining DEBUG_ALLOCATOR_KEEP to 1, we also check
+ - double free
+ - access after free
+
+ When defining DEBUG_NEW_DELETE >= 1, we
+ - overload new/delte
+ - use the Debug memory management for new/delete
+ - DEBUG_NEW_DELETE > 2 gives extensive debug output
+ */
+ template <class T>
+ class DebugAllocator {
+ public:
+ typedef std::size_t size_type;
+ typedef std::ptrdiff_t difference_type;
+ typedef T* pointer;
+ typedef const T* const_pointer;
+ typedef T& reference;
+ typedef const T& const_reference;
+ typedef T value_type;
+ template <class U> struct rebind {
+ typedef DebugAllocator<U> other;
+ };
+
+ //! create a new DebugAllocator
+ DebugAllocator() noexcept {}
+ //! copy construct from an other DebugAllocator, possibly for a different result type
+ template <class U>
+ DebugAllocator(const DebugAllocator<U>&) noexcept {}
+ //! cleanup this allocator
+ ~DebugAllocator() noexcept {}
+
+ pointer address(reference x) const
+ {
+ return &x;
+ }
+ const_pointer address(const_reference x) const
+ {
+ return &x;
+ }
+
+ //! allocate n objects of type T
+ pointer allocate(size_type n,
+ [[maybe_unused]] DebugAllocator<void>::const_pointer hint = 0)
+ {
+ return DebugMemory::alloc_man.allocate<T>(n);
+ }
+
+ //! deallocate n objects of type T at address p
+ void deallocate(pointer p, size_type n)
+ {
+ DebugMemory::alloc_man.deallocate<T>(p,n);
+ }
+
+ //! max size for allocate
+ size_type max_size() const noexcept
+ {
+ return size_type(-1) / sizeof(T);
+ }
+
+ //! copy-construct an object of type T (i.e. make a placement new on p)
+ void construct(pointer p, const T& val)
+ {
+ ::new((void*)p)T(val);
+ }
+
+ //! construct an object of type T from variadic parameters
+ template<typename ... Args>
+ void construct(pointer p, Args&&... args)
+ {
+ ::new((void *)p)T(std::forward<Args>(args) ...);
+ }
+
+ //! destroy an object of type T (i.e. call the destructor)
+ void destroy(pointer p)
+ {
+ p->~T();
+ }
+ };
+
+ //! check whether allocators are equivalent
+ template<class T>
+ constexpr bool
+ operator==(const DebugAllocator<T> &, const DebugAllocator<T> &)
+ {
+ return true;
+ }
+
+ //! check whether allocators are not equivalent
+ template<class T>
+ constexpr bool
+ operator!=(const DebugAllocator<T> &, const DebugAllocator<T> &)
+ {
+ return false;
+ }
+}
+
+#ifdef DEBUG_NEW_DELETE
+void * operator new(size_t size)
+{
+ // try to allocate size bytes
+ void *p = Dune::DebugMemory::alloc_man.allocate<char>(size);
+#if DEBUG_NEW_DELETE > 2
+ std::cout << "NEW " << size
+ << " -> " << p
+ << std::endl;
+#endif
+ return p;
+}
+
+void operator delete(void * p) noexcept
+{
+#if DEBUG_NEW_DELETE > 2
+ std::cout << "FREE " << p << std::endl;
+#endif
+ Dune::DebugMemory::alloc_man.deallocate<char>(static_cast<char*>(p));
+}
+
+void operator delete(void * p, size_t size) noexcept
+{
+#if DEBUG_NEW_DELETE > 2
+ std::cout << "FREE " << p << std::endl;
+#endif
+ Dune::DebugMemory::alloc_man.deallocate<char>(static_cast<char*>(p), size);
+}
+
+#endif // DEBUG_NEW_DELETE
+
+#endif // __has_include(<sys/mman.h>)
+
+#endif // DUNE_DEBUG_ALLOCATOR_HH
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+#ifndef DUNE_DEBUGSTREAM_HH
+#define DUNE_DEBUGSTREAM_HH
+
+/** \file
+ * \brief Defines several output streams for messages of different importance
+ */
+
+#include <iostream>
+#include <stack>
+
+#include <dune/common/exceptions.hh>
+
+namespace Dune {
+
+ /*! \defgroup DebugOut Debug output
+ \ingroup Common
+
+ The debug output is implemented by instances of DebugStream which
+ provides the following features:
+
+ - output-syntax in the standard ostream-notation
+ - output can be totally deactivated depending on template parameters
+ - streams with active output can be deactivated during runtime
+ - redirecting to std::ostream or other DebugStream s during runtime
+ - stack oriented state
+
+ The Dune-components should use the streams explained in \ref StdStreams
+ for output so that applications may redirect the output globally.
+
+ Changes in runtime are provided by three sets of methods:
+
+ - push()/pop() sets new activation flag or restore old setting
+ - attach()/detach() redirects output to a different std::ostream or restore old stream
+ - tie()/untie() redirects output through another DebugStream. If the state of the master stream changes (activation or output-stream) it is changed in the tied stream as well
+
+ The first methods implement a full stack whereas tie() is a bit
+ different: though a tied stream may be (de)activated via
+ push()/pop() you cannot attach() or detach() an output. You'll need
+ to change the master stream instead.
+
+ \section DebugAppl Applications
+
+ Applications using the Dune-library should create an independent set
+ of DebugStreams so that the debug levels can be changed separately.
+ Example:
+
+ \code
+ static const Dune::DebugLevel APPL_MINLEVEL = 3;
+
+ Dune::DebugStream<1, APPL_MINLEVEL> myverbose;
+ Dune::DebugStream<2, APPL_MINLEVEL> myinfo;
+ Dune::DebugStream<3, APPL_MINLEVEL> mywarn;
+ \endcode
+
+ This code creates three streams of which only the last one really
+ creates output. The output-routines of the other streams vanish in
+ optimized executables.
+
+ You can use the common_bits-Template to switch to a policy using bitflags:
+
+ \code
+ enum { APPL_CORE = 1, APPL_IO = 2, APPL_GRAPHICS = 4};
+
+ static const Dune::DebugLevel APPL_DEBUG_MASK = APPL_CORE | APPL_GRAPHICS;
+ static const Dune::DebugLevel APPL_ACTIVE_MASK = 0xff;
+
+ Dune::DebugStream<APPL_CORE, APPL_DEBUG_MASK, APPL_ACTIVE_MASK, Dune::common_bits> coreout;
+ Dune::DebugStream<APPL_IO, APPL_DEBUG_MASK, APPL_ACTIVE_MASK, Dune::common_bits> ioout;
+ Dune::DebugStream<APPL_GRAPHICS, APPL_DEBUG_MASK, APPL_ACTIVE_MASK, Dune::common_bits> graphout;
+ \endcode
+
+ Applications that wish to redirect the \ref StdStreams through their
+ private streams may use the tie()-mechanism:
+
+ \code
+ // initialize streams like above
+
+ Dune::dwarn.tie(coreout);
+
+ // ... Dune-output to dwarn will be directed through coreout ...
+
+ Dune::dwarn.untie();
+ \endcode
+
+ Keep in mind to untie() a stream before the tied stream is destructed.
+
+ An alternative is to attach() an output stream defined by the application:
+
+ \code
+ std::ofstream mylog("application.log");
+
+ Dune::dwarn.attach(mylog);
+ \endcode
+ */
+ /**
+ \addtogroup DebugOut
+ \{
+ */
+ /*! \file
+
+ This file implements the class DebugStream to support output in a
+ variety of debug levels. Additionally, template parameters control
+ if the output operation is really performed so that unused debug
+ levels can be deactivated
+
+ */
+
+
+ /*! \brief Type for debug levels.
+
+ Only positive values allowed
+ */
+ typedef unsigned int DebugLevel;
+
+ /*!
+
+ \brief Greater or equal template test.
+
+ value is false if current is below the threshold, true otherwise
+
+ This is the default struct to control the activation policy of
+ DebugStream and deactivates output below the threshold
+ */
+ template <DebugLevel current, DebugLevel threshold>
+ struct greater_or_equal {
+ static const bool value = (current >= threshold);
+ };
+
+
+ /*! \brief activate if current and mask have common bits switched on.
+
+ This template implements an alternative strategy to activate or
+ deactivate a DebugStream. Keep in mind to number your streams as
+ powers of two if using this template
+ */
+ template <DebugLevel current, DebugLevel mask>
+ struct common_bits {
+ enum {value = ((current & mask)!=0) };
+ };
+
+
+ //! \brief standard exception for the debugstream
+ class DebugStreamError : public IOError {};
+
+ class StreamWrap {
+ public:
+ StreamWrap(std::ostream& _out) : out(_out) { }
+ std::ostream& out;
+ StreamWrap *next;
+ };
+
+ //! \brief Intermediate class to implement tie-operation of DebugStream
+ class DebugStreamState {
+ // !!! should be protected somehow but that won't be easy
+ public:
+ //! \brief current output stream and link to possibly pushed old output streams
+ StreamWrap* current;
+
+ //! \brief flag to switch output during runtime
+ bool _active;
+
+ //! \brief are we tied to another DebugStream?
+ bool _tied;
+
+ //! \brief how many streams are tied to this state
+ unsigned int _tied_streams;
+ };
+
+ /*!
+ \brief Generic class to implement debug output streams
+
+ The main function of a DebugStream is to provide output in a
+ standard ostream fashion that is fully deactivated if the level of
+ the stream does not meet the current requirements. More information in \ref DebugOut
+
+ \param thislevel this level
+ \param dlevel level needed for any output to happen
+ \param alevel level needed to switch activation flag on
+ \param activator template describing the activation policy
+
+ \todo Fix visibility of internal data
+ */
+ template <DebugLevel thislevel = 1,
+ DebugLevel dlevel = 1,
+ DebugLevel alevel = 1,
+ template<DebugLevel, DebugLevel> class activator = greater_or_equal>
+ class DebugStream : public DebugStreamState {
+ public:
+ /*! \brief Create a DebugStream and set initial output stream
+
+ during runtime another stream can be attach()ed, however the
+ initial stream may not be detach()ed.
+ */
+ DebugStream(std::ostream& out = std::cerr) {
+ // start a new list of streams
+ current = new StreamWrap(out);
+ current->next = 0;
+
+ // check if we are above the default activation level
+ _active = activator<thislevel,alevel>::value;
+
+ // we're not tied to another DebugStream
+ _tied = false;
+
+ // no child streams yet
+ _tied_streams = 0;
+ }
+
+ /*! \brief Create a DebugStream and directly tie to another DebugStream
+
+ The fallback is used if a DebugStream constructed via this method
+ is untie()ed later. Otherwise the stream would be broken afterwards.
+ */
+ DebugStream (DebugStreamState& master,
+ std::ostream& fallback = std::cerr)
+ {
+ // start a new list of streams
+ current = new StreamWrap(fallback);
+ current->next = 0;
+
+ // check if we are above the default activation level
+ _active = activator<thislevel,alevel>::value;
+ _tied_streams = 0;
+
+ // tie to the provided stream
+ _tied = true;
+ tiedstate = &master;
+ tiedstate->_tied_streams++;
+ }
+
+ /*! \brief Destroy stream.
+
+ If other streams still tie() to this stream the destructor
+ will call std::terminate() because you can hardly recover
+ from this problem and the child streams would certainly break on the
+ next output.
+ */
+ ~DebugStream()
+ {
+ // untie
+ if (_tied)
+ tiedstate->_tied_streams--;
+ else {
+ // check if somebody still ties to us...
+ if (_tied_streams != 0)
+ {
+ std::cerr << "DebugStream destructor is called while other streams are still tied to it. Terminating!" << std::endl;
+ std::terminate();
+ }
+ }
+
+ // remove ostream-stack
+ while (current != 0) {
+ StreamWrap *s = current;
+ current = current->next;
+ delete s;
+ }
+ }
+
+ //! \brief Generic types are passed on to current output stream
+ template <class T>
+ DebugStream& operator<<(const T data) {
+ // remove the following code if stream wasn't compiled active
+ if (activator<thislevel, dlevel>::value) {
+ if (! _tied) {
+ if (_active)
+ current->out << data;
+ } else {
+ if (_active && tiedstate->_active)
+ tiedstate->current->out << data;
+ }
+ }
+
+ return *this;
+ }
+
+ /*! \brief explicit specialization so that enums can be printed
+
+ Operators for built-in types follow special
+ rules (§11.2.3) so that enums won't fit into the generic
+ method above. With an existing operator<< for int however
+ the enum will be automatically casted.
+ */
+ DebugStream& operator<<(const int data) {
+ // remove the following code if stream wasn't compiled active
+ if (activator<thislevel, dlevel>::value) {
+ if (! _tied) {
+ if (_active)
+ current->out << data;
+ } else {
+ if (_active && tiedstate->_active)
+ tiedstate->current->out << data;
+ }
+ }
+
+ return *this;
+ }
+
+ //! \brief pass on manipulators to underlying output stream
+ DebugStream& operator<<(std::ostream& (*f)(std::ostream&)) {
+ if (activator<thislevel, dlevel>::value) {
+ if (! _tied) {
+ if (_active)
+ f(current->out);
+ } else {
+ if (_active && tiedstate->_active)
+ f(tiedstate->current->out);
+ }
+ }
+
+ return *this;
+ }
+
+ //! \brief pass on flush to underlying output stream
+ DebugStream& flush() {
+ if (activator<thislevel, dlevel>::value) {
+ if (! _tied) {
+ if (_active)
+ current->out.flush();
+ } else {
+ if (_active && tiedstate->_active)
+ tiedstate->current->out.flush();
+ }
+ }
+
+ return *this;
+ }
+
+ //! \brief set activation flag and store old value
+ void push(bool b) {
+ // are we at all active?
+ if (activator<thislevel,alevel>::value) {
+ _actstack.push(_active);
+ _active = b;
+ } else {
+ // stay off
+ _actstack.push(false);
+ }
+ }
+
+ /*! \brief restore previously set activation flag
+ * \throws DebugStreamError
+ */
+ void pop() {
+ if (_actstack.empty())
+ DUNE_THROW(DebugStreamError, "No previous activation setting!");
+
+ _active = _actstack.top();
+ _actstack.pop();
+ }
+
+ /*! \brief reports if this stream will produce output
+
+ a DebugStream that is deactivated because of its level will always
+ return false, otherwise the state of the internal activation is
+ returned
+ */
+ bool active() const {
+ return activator<thislevel, dlevel>::value && _active;
+ }
+
+ /*! \brief set output to a different stream.
+
+ Old stream data is stored
+ */
+ void attach(std::ostream& stream) {
+ if (_tied)
+ DUNE_THROW(DebugStreamError, "Cannot attach to a tied stream!");
+
+ StreamWrap* newcurr = new StreamWrap(stream);
+ newcurr->next = current;
+ current = newcurr;
+ }
+
+ /*! \brief detach current output stream and restore to previous stream
+ * \throws DebugStreamError
+ */
+ void detach() {
+ if (current->next == 0)
+ DUNE_THROW(DebugStreamError, "Cannot detach initial stream!");
+ if (_tied)
+ DUNE_THROW(DebugStreamError, "Cannot detach a tied stream!");
+
+ StreamWrap* old = current;
+ current = current->next;
+ delete old;
+ }
+
+ /*! \brief Tie a stream to this one.
+ * \throws DebugStreamError
+ */
+ void tie(DebugStreamState& to) {
+ if (to._tied)
+ DUNE_THROW(DebugStreamError, "Cannot tie to an already tied stream!");
+ if (_tied)
+ DUNE_THROW(DebugStreamError, "Stream already tied: untie first!");
+
+ _tied = true;
+ tiedstate = &to;
+
+ // tell master class
+ tiedstate->_tied_streams++;
+ }
+
+ /*! \brief Untie stream
+ * \throws DebugStreamError
+ */
+ void untie() {
+ if(! _tied)
+ DUNE_THROW(DebugStreamError, "Cannot untie, stream is not tied!");
+
+ tiedstate->_tied_streams--;
+ _tied = false;
+ tiedstate = 0;
+ }
+
+ private:
+ //! \brief pointer to data of stream we're tied to
+ DebugStreamState* tiedstate;
+
+ /*! \brief Activation state history.
+
+ store old activation settings so that the outside code doesn't
+ need to remember */
+ std::stack<bool> _actstack;
+ };
+
+ /** /} */
+}
+
+
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_DENSEMATRIX_HH
+#define DUNE_DENSEMATRIX_HH
+
+#include <cmath>
+#include <cstddef>
+#include <iostream>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+#include <dune/common/boundschecking.hh>
+#include <dune/common/classname.hh>
+#include <dune/common/deprecated.hh>
+#include <dune/common/exceptions.hh>
+#include <dune/common/fvector.hh>
+#include <dune/common/math.hh>
+#include <dune/common/precision.hh>
+#include <dune/common/simd/simd.hh>
+#include <dune/common/typetraits.hh>
+#include <dune/common/scalarvectorview.hh>
+
+namespace Dune
+{
+
+ template<typename M> class DenseMatrix;
+
+ template<typename M>
+ struct FieldTraits< DenseMatrix<M> >
+ {
+ typedef const typename FieldTraits< typename DenseMatVecTraits<M>::value_type >::field_type field_type;
+ typedef const typename FieldTraits< typename DenseMatVecTraits<M>::value_type >::real_type real_type;
+ };
+
+ /**
+ work around a problem of FieldMatrix/FieldVector,
+ there is no unique way to obtain the size of a class
+
+ \deprecated VectorSize is deprecated; please call the 'size()' method directly instead.
+ This will be removed after Dune 2.8.
+ */
+ template<class K, int N, int M> class FieldMatrix;
+ template<class K, int N> class FieldVector;
+ namespace {
+ template<class V>
+ struct [[deprecated("VectorSize is deprecated; please call the 'size()' method directly instead")]] VectorSize
+ {
+ static typename V::size_type size(const V & v) { return v.size(); }
+ };
+
+ DUNE_NO_DEPRECATED_BEGIN
+ template<class K, int N>
+ struct [[deprecated("VectorSize is deprecated; please call the 'size()' method directly instead")]] VectorSize< const FieldVector<K,N> >
+ {
+ typedef FieldVector<K,N> V;
+ static typename V::size_type size([[maybe_unused]] const V & v)
+ {
+ return N;
+ }
+ };
+ DUNE_NO_DEPRECATED_END
+ }
+
+ /**
+ @addtogroup DenseMatVec
+ @{
+ */
+
+ /*! \file
+
+ \brief Implements a matrix constructed from a given type
+ representing a field and a compile-time given number of rows and columns.
+ */
+
+
+
+ /**
+ \brief you have to specialize this structure for any type that should be assignable to a DenseMatrix
+ \tparam DenseMatrix Some type implementing the dense matrix interface
+ \tparam RHS Right hand side type
+ */
+ template< class DenseMatrix, class RHS >
+ struct DenseMatrixAssigner;
+
+#ifndef DOXYGEN
+ namespace Impl
+ {
+
+ template< class DenseMatrix, class RHS, class = void >
+ class DenseMatrixAssigner
+ {};
+
+ template< class DenseMatrix, class RHS >
+ class DenseMatrixAssigner< DenseMatrix, RHS, std::enable_if_t< Dune::IsNumber< RHS >::value > >
+ {
+ public:
+ static void apply ( DenseMatrix &denseMatrix, const RHS &rhs )
+ {
+ typedef typename DenseMatrix::field_type field_type;
+ std::fill( denseMatrix.begin(), denseMatrix.end(), static_cast< field_type >( rhs ) );
+ }
+ };
+
+ template< class DenseMatrix, class RHS >
+ class DenseMatrixAssigner< DenseMatrix, RHS, std::enable_if_t< !std::is_same< typename RHS::const_iterator, void >::value
+ && std::is_convertible< typename RHS::const_iterator::value_type, typename DenseMatrix::iterator::value_type >::value > >
+ {
+ public:
+ static void apply ( DenseMatrix &denseMatrix, const RHS &rhs )
+ {
+ DUNE_ASSERT_BOUNDS(rhs.N() == denseMatrix.N());
+ DUNE_ASSERT_BOUNDS(rhs.M() == denseMatrix.M());
+ typename DenseMatrix::iterator tIt = std::begin(denseMatrix);
+ typename RHS::const_iterator sIt = std::begin(rhs);
+ for(; sIt != std::end(rhs); ++tIt, ++sIt)
+ std::copy(std::begin(*sIt), std::end(*sIt), std::begin(*tIt));
+ }
+ };
+
+ } // namespace Impl
+
+
+
+ template< class DenseMatrix, class RHS >
+ struct DenseMatrixAssigner
+ : public Impl::DenseMatrixAssigner< DenseMatrix, RHS >
+ {};
+
+
+ namespace Impl
+ {
+
+ template< class DenseMatrix, class RHS >
+ std::true_type hasDenseMatrixAssigner ( DenseMatrix &, const RHS &, decltype( Dune::DenseMatrixAssigner< DenseMatrix, RHS >::apply( std::declval< DenseMatrix & >(), std::declval< const RHS & >() ) ) * = nullptr );
+
+ std::false_type hasDenseMatrixAssigner ( ... );
+
+ } // namespace Impl
+
+ template< class DenseMatrix, class RHS >
+ struct HasDenseMatrixAssigner
+ : public decltype( Impl::hasDenseMatrixAssigner( std::declval< DenseMatrix & >(), std::declval< const RHS & >() ) )
+ {};
+
+#endif // #ifndef DOXYGEN
+
+
+
+ /** @brief Error thrown if operations of a FieldMatrix fail. */
+ class FMatrixError : public MathError {};
+
+ /**
+ @brief A dense n x m matrix.
+
+ Matrices represent linear maps from a vector space V to a vector space W.
+ This class represents such a linear map by storing a two-dimensional
+ %array of numbers of a given field type K. The number of rows and
+ columns is given at compile time.
+
+ \tparam MAT type of the matrix implementation
+ */
+ template<typename MAT>
+ class DenseMatrix
+ {
+ typedef DenseMatVecTraits<MAT> Traits;
+
+ // Curiously recurring template pattern
+ constexpr MAT & asImp() { return static_cast<MAT&>(*this); }
+ constexpr const MAT & asImp() const { return static_cast<const MAT&>(*this); }
+
+ template <class>
+ friend class DenseMatrix;
+
+ public:
+ //===== type definitions and constants
+
+ //! type of derived matrix class
+ typedef typename Traits::derived_type derived_type;
+
+ //! export the type representing the field
+ typedef typename Traits::value_type value_type;
+
+ //! export the type representing the field
+ typedef typename Traits::value_type field_type;
+
+ //! export the type representing the components
+ typedef typename Traits::value_type block_type;
+
+ //! The type used for the index access and size operation
+ typedef typename Traits::size_type size_type;
+
+ //! The type used to represent a row (must fulfill the Dune::DenseVector interface)
+ typedef typename Traits::row_type row_type;
+
+ //! The type used to represent a reference to a row (usually row_type &)
+ typedef typename Traits::row_reference row_reference;
+
+ //! The type used to represent a reference to a constant row (usually const row_type &)
+ typedef typename Traits::const_row_reference const_row_reference;
+
+ //! We are at the leaf of the block recursion
+ enum {
+ //! The number of block levels we contain. This is 1.
+ blocklevel = 1
+ };
+
+ private:
+ //! \brief if value_type is a simd vector, then this is a simd vector of
+ //! the same length that can be used for indices.
+ using simd_index_type = Simd::Rebind<std::size_t, value_type>;
+
+ public:
+ //===== access to components
+
+ //! random access
+ row_reference operator[] ( size_type i )
+ {
+ return asImp().mat_access(i);
+ }
+
+ const_row_reference operator[] ( size_type i ) const
+ {
+ return asImp().mat_access(i);
+ }
+
+ //! size method (number of rows)
+ size_type size() const
+ {
+ return rows();
+ }
+
+ //===== iterator interface to rows of the matrix
+ //! Iterator class for sequential access
+ typedef DenseIterator<DenseMatrix,row_type,row_reference> Iterator;
+ //! typedef for stl compliant access
+ typedef Iterator iterator;
+ //! rename the iterators for easier access
+ typedef Iterator RowIterator;
+ //! rename the iterators for easier access
+ typedef typename std::remove_reference<row_reference>::type::Iterator ColIterator;
+
+ //! begin iterator
+ Iterator begin ()
+ {
+ return Iterator(*this,0);
+ }
+
+ //! end iterator
+ Iterator end ()
+ {
+ return Iterator(*this,rows());
+ }
+
+ //! @returns an iterator that is positioned before
+ //! the end iterator of the vector, i.e. at the last entry.
+ Iterator beforeEnd ()
+ {
+ return Iterator(*this,rows()-1);
+ }
+
+ //! @returns an iterator that is positioned before
+ //! the first entry of the vector.
+ Iterator beforeBegin ()
+ {
+ return Iterator(*this,-1);
+ }
+
+ //! Iterator class for sequential access
+ typedef DenseIterator<const DenseMatrix,const row_type,const_row_reference> ConstIterator;
+ //! typedef for stl compliant access
+ typedef ConstIterator const_iterator;
+ //! rename the iterators for easier access
+ typedef ConstIterator ConstRowIterator;
+ //! rename the iterators for easier access
+ typedef typename std::remove_reference<const_row_reference>::type::ConstIterator ConstColIterator;
+
+ //! begin iterator
+ ConstIterator begin () const
+ {
+ return ConstIterator(*this,0);
+ }
+
+ //! end iterator
+ ConstIterator end () const
+ {
+ return ConstIterator(*this,rows());
+ }
+
+ //! @returns an iterator that is positioned before
+ //! the end iterator of the vector. i.e. at the last element
+ ConstIterator beforeEnd () const
+ {
+ return ConstIterator(*this,rows()-1);
+ }
+
+ //! @returns an iterator that is positioned before
+ //! the first entry of the vector.
+ ConstIterator beforeBegin () const
+ {
+ return ConstIterator(*this,-1);
+ }
+
+ //===== assignment
+
+ template< class RHS, class = std::enable_if_t< HasDenseMatrixAssigner< MAT, RHS >::value > >
+ derived_type &operator= ( const RHS &rhs )
+ {
+ DenseMatrixAssigner< MAT, RHS >::apply( asImp(), rhs );
+ return asImp();
+ }
+
+ //===== vector space arithmetic
+
+ //! vector space addition
+ template <class Other>
+ derived_type &operator+= (const DenseMatrix<Other>& x)
+ {
+ DUNE_ASSERT_BOUNDS(rows() == x.rows());
+ for (size_type i=0; i<rows(); i++)
+ (*this)[i] += x[i];
+ return asImp();
+ }
+
+ //! Matrix negation
+ derived_type operator- () const
+ {
+ MAT result;
+ using idx_type = typename decltype(result)::size_type;
+
+ for (idx_type i = 0; i < rows(); ++i)
+ for (idx_type j = 0; j < cols(); ++j)
+ result[i][j] = - asImp()[i][j];
+
+ return result;
+ }
+
+ //! vector space subtraction
+ template <class Other>
+ derived_type &operator-= (const DenseMatrix<Other>& x)
+ {
+ DUNE_ASSERT_BOUNDS(rows() == x.rows());
+ for (size_type i=0; i<rows(); i++)
+ (*this)[i] -= x[i];
+ return asImp();
+ }
+
+ //! vector space multiplication with scalar
+ derived_type &operator*= (const field_type& k)
+ {
+ for (size_type i=0; i<rows(); i++)
+ (*this)[i] *= k;
+ return asImp();
+ }
+
+ //! vector space division by scalar
+ derived_type &operator/= (const field_type& k)
+ {
+ for (size_type i=0; i<rows(); i++)
+ (*this)[i] /= k;
+ return asImp();
+ }
+
+ //! vector space axpy operation (*this += a x)
+ template <class Other>
+ derived_type &axpy (const field_type &a, const DenseMatrix<Other> &x )
+ {
+ DUNE_ASSERT_BOUNDS(rows() == x.rows());
+ for( size_type i = 0; i < rows(); ++i )
+ (*this)[ i ].axpy( a, x[ i ] );
+ return asImp();
+ }
+
+ //! Binary matrix comparison
+ template <class Other>
+ bool operator== (const DenseMatrix<Other>& x) const
+ {
+ DUNE_ASSERT_BOUNDS(rows() == x.rows());
+ for (size_type i=0; i<rows(); i++)
+ if ((*this)[i]!=x[i])
+ return false;
+ return true;
+ }
+ //! Binary matrix incomparison
+ template <class Other>
+ bool operator!= (const DenseMatrix<Other>& x) const
+ {
+ return !operator==(x);
+ }
+
+
+ //===== linear maps
+
+ //! y = A x
+ template<class X, class Y>
+ void mv (const X& x, Y& y) const
+ {
+ auto&& xx = Impl::asVector(x);
+ auto&& yy = Impl::asVector(y);
+ DUNE_ASSERT_BOUNDS((void*)(&x) != (void*)(&y));
+ DUNE_ASSERT_BOUNDS(xx.N() == M());
+ DUNE_ASSERT_BOUNDS(yy.N() == N());
+
+ using y_field_type = typename FieldTraits<Y>::field_type;
+ for (size_type i=0; i<rows(); ++i)
+ {
+ yy[i] = y_field_type(0);
+ for (size_type j=0; j<cols(); j++)
+ yy[i] += (*this)[i][j] * xx[j];
+ }
+ }
+
+ //! y = A^T x
+ template< class X, class Y >
+ void mtv ( const X &x, Y &y ) const
+ {
+ auto&& xx = Impl::asVector(x);
+ auto&& yy = Impl::asVector(y);
+ DUNE_ASSERT_BOUNDS((void*)(&x) != (void*)(&y));
+ DUNE_ASSERT_BOUNDS(xx.N() == N());
+ DUNE_ASSERT_BOUNDS(yy.N() == M());
+
+ using y_field_type = typename FieldTraits<Y>::field_type;
+ for(size_type i = 0; i < cols(); ++i)
+ {
+ yy[i] = y_field_type(0);
+ for(size_type j = 0; j < rows(); ++j)
+ yy[i] += (*this)[j][i] * xx[j];
+ }
+ }
+
+ //! y += A x
+ template<class X, class Y>
+ void umv (const X& x, Y& y) const
+ {
+ auto&& xx = Impl::asVector(x);
+ auto&& yy = Impl::asVector(y);
+ DUNE_ASSERT_BOUNDS(xx.N() == M());
+ DUNE_ASSERT_BOUNDS(yy.N() == N());
+ for (size_type i=0; i<rows(); ++i)
+ for (size_type j=0; j<cols(); j++)
+ yy[i] += (*this)[i][j] * xx[j];
+ }
+
+ //! y += A^T x
+ template<class X, class Y>
+ void umtv (const X& x, Y& y) const
+ {
+ auto&& xx = Impl::asVector(x);
+ auto&& yy = Impl::asVector(y);
+ DUNE_ASSERT_BOUNDS(xx.N() == N());
+ DUNE_ASSERT_BOUNDS(yy.N() == M());
+ for(size_type i = 0; i<rows(); ++i)
+ for (size_type j=0; j<cols(); j++)
+ yy[j] += (*this)[i][j]*xx[i];
+ }
+
+ //! y += A^H x
+ template<class X, class Y>
+ void umhv (const X& x, Y& y) const
+ {
+ auto&& xx = Impl::asVector(x);
+ auto&& yy = Impl::asVector(y);
+ DUNE_ASSERT_BOUNDS(xx.N() == N());
+ DUNE_ASSERT_BOUNDS(yy.N() == M());
+ for (size_type i=0; i<rows(); i++)
+ for (size_type j=0; j<cols(); j++)
+ yy[j] += conjugateComplex((*this)[i][j])*xx[i];
+ }
+
+ //! y -= A x
+ template<class X, class Y>
+ void mmv (const X& x, Y& y) const
+ {
+ auto&& xx = Impl::asVector(x);
+ auto&& yy = Impl::asVector(y);
+ DUNE_ASSERT_BOUNDS(xx.N() == M());
+ DUNE_ASSERT_BOUNDS(yy.N() == N());
+ for (size_type i=0; i<rows(); i++)
+ for (size_type j=0; j<cols(); j++)
+ yy[i] -= (*this)[i][j] * xx[j];
+ }
+
+ //! y -= A^T x
+ template<class X, class Y>
+ void mmtv (const X& x, Y& y) const
+ {
+ auto&& xx = Impl::asVector(x);
+ auto&& yy = Impl::asVector(y);
+ DUNE_ASSERT_BOUNDS(xx.N() == N());
+ DUNE_ASSERT_BOUNDS(yy.N() == M());
+ for (size_type i=0; i<rows(); i++)
+ for (size_type j=0; j<cols(); j++)
+ yy[j] -= (*this)[i][j]*xx[i];
+ }
+
+ //! y -= A^H x
+ template<class X, class Y>
+ void mmhv (const X& x, Y& y) const
+ {
+ auto&& xx = Impl::asVector(x);
+ auto&& yy = Impl::asVector(y);
+ DUNE_ASSERT_BOUNDS(xx.N() == N());
+ DUNE_ASSERT_BOUNDS(yy.N() == M());
+ for (size_type i=0; i<rows(); i++)
+ for (size_type j=0; j<cols(); j++)
+ yy[j] -= conjugateComplex((*this)[i][j])*xx[i];
+ }
+
+ //! y += alpha A x
+ template<class X, class Y>
+ void usmv (const typename FieldTraits<Y>::field_type & alpha,
+ const X& x, Y& y) const
+ {
+ auto&& xx = Impl::asVector(x);
+ auto&& yy = Impl::asVector(y);
+ DUNE_ASSERT_BOUNDS(xx.N() == M());
+ DUNE_ASSERT_BOUNDS(yy.N() == N());
+ for (size_type i=0; i<rows(); i++)
+ for (size_type j=0; j<cols(); j++)
+ yy[i] += alpha * (*this)[i][j] * xx[j];
+ }
+
+ //! y += alpha A^T x
+ template<class X, class Y>
+ void usmtv (const typename FieldTraits<Y>::field_type & alpha,
+ const X& x, Y& y) const
+ {
+ auto&& xx = Impl::asVector(x);
+ auto&& yy = Impl::asVector(y);
+ DUNE_ASSERT_BOUNDS(xx.N() == N());
+ DUNE_ASSERT_BOUNDS(yy.N() == M());
+ for (size_type i=0; i<rows(); i++)
+ for (size_type j=0; j<cols(); j++)
+ yy[j] += alpha*(*this)[i][j]*xx[i];
+ }
+
+ //! y += alpha A^H x
+ template<class X, class Y>
+ void usmhv (const typename FieldTraits<Y>::field_type & alpha,
+ const X& x, Y& y) const
+ {
+ auto&& xx = Impl::asVector(x);
+ auto&& yy = Impl::asVector(y);
+ DUNE_ASSERT_BOUNDS(xx.N() == N());
+ DUNE_ASSERT_BOUNDS(yy.N() == M());
+ for (size_type i=0; i<rows(); i++)
+ for (size_type j=0; j<cols(); j++)
+ yy[j] +=
+ alpha*conjugateComplex((*this)[i][j])*xx[i];
+ }
+
+ //===== norms
+
+ //! frobenius norm: sqrt(sum over squared values of entries)
+ typename FieldTraits<value_type>::real_type frobenius_norm () const
+ {
+ typename FieldTraits<value_type>::real_type sum=(0.0);
+ for (size_type i=0; i<rows(); ++i) sum += (*this)[i].two_norm2();
+ return fvmeta::sqrt(sum);
+ }
+
+ //! square of frobenius norm, need for block recursion
+ typename FieldTraits<value_type>::real_type frobenius_norm2 () const
+ {
+ typename FieldTraits<value_type>::real_type sum=(0.0);
+ for (size_type i=0; i<rows(); ++i) sum += (*this)[i].two_norm2();
+ return sum;
+ }
+
+ //! infinity norm (row sum norm, how to generalize for blocks?)
+ template <typename vt = value_type,
+ typename std::enable_if<!HasNaN<vt>::value, int>::type = 0>
+ typename FieldTraits<vt>::real_type infinity_norm() const {
+ using real_type = typename FieldTraits<vt>::real_type;
+ using std::max;
+
+ real_type norm = 0;
+ for (auto const &x : *this) {
+ real_type const a = x.one_norm();
+ norm = max(a, norm);
+ }
+ return norm;
+ }
+
+ //! simplified infinity norm (uses Manhattan norm for complex values)
+ template <typename vt = value_type,
+ typename std::enable_if<!HasNaN<vt>::value, int>::type = 0>
+ typename FieldTraits<vt>::real_type infinity_norm_real() const {
+ using real_type = typename FieldTraits<vt>::real_type;
+ using std::max;
+
+ real_type norm = 0;
+ for (auto const &x : *this) {
+ real_type const a = x.one_norm_real();
+ norm = max(a, norm);
+ }
+ return norm;
+ }
+
+ //! infinity norm (row sum norm, how to generalize for blocks?)
+ template <typename vt = value_type,
+ typename std::enable_if<HasNaN<vt>::value, int>::type = 0>
+ typename FieldTraits<vt>::real_type infinity_norm() const {
+ using real_type = typename FieldTraits<vt>::real_type;
+ using std::max;
+
+ real_type norm = 0;
+ real_type isNaN = 1;
+ for (auto const &x : *this) {
+ real_type const a = x.one_norm();
+ norm = max(a, norm);
+ isNaN += a;
+ }
+ return norm * (isNaN / isNaN);
+ }
+
+ //! simplified infinity norm (uses Manhattan norm for complex values)
+ template <typename vt = value_type,
+ typename std::enable_if<HasNaN<vt>::value, int>::type = 0>
+ typename FieldTraits<vt>::real_type infinity_norm_real() const {
+ using real_type = typename FieldTraits<vt>::real_type;
+ using std::max;
+
+ real_type norm = 0;
+ real_type isNaN = 1;
+ for (auto const &x : *this) {
+ real_type const a = x.one_norm_real();
+ norm = max(a, norm);
+ isNaN += a;
+ }
+ return norm * (isNaN / isNaN);
+ }
+
+ //===== solve
+
+ /** \brief Solve system A x = b
+ *
+ * \exception FMatrixError if the matrix is singular
+ */
+ template <class V1, class V2>
+ void solve (V1& x, const V2& b, bool doPivoting = true) const;
+
+ /** \brief Compute inverse
+ *
+ * \exception FMatrixError if the matrix is singular
+ */
+ void invert(bool doPivoting = true);
+
+ //! calculates the determinant of this matrix
+ field_type determinant (bool doPivoting = true) const;
+
+ //! Multiplies M from the left to this matrix
+ template<typename M2>
+ MAT& leftmultiply (const DenseMatrix<M2>& M)
+ {
+ DUNE_ASSERT_BOUNDS(M.rows() == M.cols());
+ DUNE_ASSERT_BOUNDS(M.rows() == rows());
+ AutonomousValue<MAT> C(asImp());
+
+ for (size_type i=0; i<rows(); i++)
+ for (size_type j=0; j<cols(); j++) {
+ (*this)[i][j] = 0;
+ for (size_type k=0; k<rows(); k++)
+ (*this)[i][j] += M[i][k]*C[k][j];
+ }
+
+ return asImp();
+ }
+
+ //! Multiplies M from the right to this matrix
+ template<typename M2>
+ MAT& rightmultiply (const DenseMatrix<M2>& M)
+ {
+ DUNE_ASSERT_BOUNDS(M.rows() == M.cols());
+ DUNE_ASSERT_BOUNDS(M.cols() == cols());
+ AutonomousValue<MAT> C(asImp());
+
+ for (size_type i=0; i<rows(); i++)
+ for (size_type j=0; j<cols(); j++) {
+ (*this)[i][j] = 0;
+ for (size_type k=0; k<cols(); k++)
+ (*this)[i][j] += C[i][k]*M[k][j];
+ }
+ return asImp();
+ }
+
+#if 0
+ //! Multiplies M from the left to this matrix, this matrix is not modified
+ template<int l>
+ DenseMatrix<K,l,cols> leftmultiplyany (const FieldMatrix<K,l,rows>& M) const
+ {
+ FieldMatrix<K,l,cols> C;
+
+ for (size_type i=0; i<l; i++) {
+ for (size_type j=0; j<cols(); j++) {
+ C[i][j] = 0;
+ for (size_type k=0; k<rows(); k++)
+ C[i][j] += M[i][k]*(*this)[k][j];
+ }
+ }
+ return C;
+ }
+
+ //! Multiplies M from the right to this matrix, this matrix is not modified
+ template<int l>
+ FieldMatrix<K,rows,l> rightmultiplyany (const FieldMatrix<K,cols,l>& M) const
+ {
+ FieldMatrix<K,rows,l> C;
+
+ for (size_type i=0; i<rows(); i++) {
+ for (size_type j=0; j<l; j++) {
+ C[i][j] = 0;
+ for (size_type k=0; k<cols(); k++)
+ C[i][j] += (*this)[i][k]*M[k][j];
+ }
+ }
+ return C;
+ }
+#endif
+
+ //===== sizes
+
+ //! number of rows
+ constexpr size_type N () const
+ {
+ return rows();
+ }
+
+ //! number of columns
+ constexpr size_type M () const
+ {
+ return cols();
+ }
+
+ //! number of rows
+ constexpr size_type rows() const
+ {
+ return asImp().mat_rows();
+ }
+
+ //! number of columns
+ constexpr size_type cols() const
+ {
+ return asImp().mat_cols();
+ }
+
+ //===== query
+
+ //! return true when (i,j) is in pattern
+ bool exists ([[maybe_unused]] size_type i, [[maybe_unused]] size_type j) const
+ {
+ DUNE_ASSERT_BOUNDS(i >= 0 && i < rows());
+ DUNE_ASSERT_BOUNDS(j >= 0 && j < cols());
+ return true;
+ }
+
+ protected:
+
+#ifndef DOXYGEN
+ struct ElimPivot
+ {
+ ElimPivot(std::vector<simd_index_type> & pivot);
+
+ void swap(std::size_t i, simd_index_type j);
+
+ template<typename T>
+ void operator()(const T&, int, int)
+ {}
+
+ std::vector<simd_index_type> & pivot_;
+ };
+
+ template<typename V>
+ struct Elim
+ {
+ Elim(V& rhs);
+
+ void swap(std::size_t i, simd_index_type j);
+
+ void operator()(const typename V::field_type& factor, int k, int i);
+
+ V* rhs_;
+ };
+
+ struct ElimDet
+ {
+ ElimDet(field_type& sign) : sign_(sign)
+ { sign_ = 1; }
+
+ void swap(std::size_t i, simd_index_type j)
+ {
+ sign_ *=
+ Simd::cond(simd_index_type(i) == j, field_type(1), field_type(-1));
+ }
+
+ void operator()(const field_type&, int, int)
+ {}
+
+ field_type& sign_;
+ };
+#endif // DOXYGEN
+
+ //! do an LU-Decomposition on matrix A
+ /**
+ * \param A The matrix to decompose, and to store the
+ * result in.
+ * \param func Functor used for swapping lanes and to conduct
+ * the elimination. Depending on the functor, \c
+ * luDecomposition() can be used for solving, for
+ * inverting, or to compute the determinant.
+ * \param nonsingularLanes SimdMask of lanes that are nonsingular.
+ * \param throwEarly Whether to throw an \c FMatrixError immediately
+ * as soon as one lane is discovered to be
+ * singular. If \c false, do not throw, instead
+ * continue until finished or all lanes are
+ * singular, and exit via return in both cases.
+ * \param doPivoting Enable pivoting.
+ *
+ * There are two modes of operation:
+ * <ul>
+ * <li>Terminate as soon as one lane is discovered to be singular. Early
+ * termination is done by throwing an \c FMatrixError. On entry, \c
+ * Simd::allTrue(nonsingularLanes) and \c throwEarly==true should hold.
+ * After early termination, the contents of \c A should be considered
+ * bogus, and \c nonsingularLanes has the lane(s) that triggered the
+ * early termination unset. There may be more singular lanes than the
+ * one reported in \c nonsingularLanes, which just havent been
+ * discovered yet; so the value of \c nonsingularLanes is mostly
+ * useful for diagnostics.</li>
+ * <li>Terminate only when all lanes are discovered to be singular. Use
+ * this when you want to apply special postprocessing in singular
+ * lines (e.g. setting the determinant of singular lanes to 0 in \c
+ * determinant()). On entry, \c nonsingularLanes may have any value
+ * and \c throwEarly==false should hold. The function will not throw
+ * an exception if some lanes are discovered to be singular, instead
+ * it will continue running until all lanes are singular or until
+ * finished, and terminate only via normal return. On exit, \c
+ * nonsingularLanes contains the map of lanes that are valid in \c
+ * A.</li>
+ * </ul>
+ */
+ template<class Func, class Mask>
+ static void luDecomposition(DenseMatrix<MAT>& A, Func func,
+ Mask &nonsingularLanes, bool throwEarly, bool doPivoting);
+ };
+
+#ifndef DOXYGEN
+ template<typename MAT>
+ DenseMatrix<MAT>::ElimPivot::ElimPivot(std::vector<simd_index_type> & pivot)
+ : pivot_(pivot)
+ {
+ typedef typename std::vector<size_type>::size_type size_type;
+ for(size_type i=0; i < pivot_.size(); ++i) pivot_[i]=i;
+ }
+
+ template<typename MAT>
+ void DenseMatrix<MAT>::ElimPivot::swap(std::size_t i, simd_index_type j)
+ {
+ pivot_[i] =
+ Simd::cond(Simd::Scalar<simd_index_type>(i) == j, pivot_[i], j);
+ }
+
+ template<typename MAT>
+ template<typename V>
+ DenseMatrix<MAT>::Elim<V>::Elim(V& rhs)
+ : rhs_(&rhs)
+ {}
+
+ template<typename MAT>
+ template<typename V>
+ void DenseMatrix<MAT>::Elim<V>::swap(std::size_t i, simd_index_type j)
+ {
+ using std::swap;
+
+ // see the comment in luDecomposition()
+ for(std::size_t l = 0; l < Simd::lanes(j); ++l)
+ swap(Simd::lane(l, (*rhs_)[ i ]),
+ Simd::lane(l, (*rhs_)[Simd::lane(l, j)]));
+ }
+
+ template<typename MAT>
+ template<typename V>
+ void DenseMatrix<MAT>::
+ Elim<V>::operator()(const typename V::field_type& factor, int k, int i)
+ {
+ (*rhs_)[k] -= factor*(*rhs_)[i];
+ }
+
+ template<typename MAT>
+ template<typename Func, class Mask>
+ inline void DenseMatrix<MAT>::
+ luDecomposition(DenseMatrix<MAT>& A, Func func, Mask &nonsingularLanes,
+ bool throwEarly, bool doPivoting)
+ {
+ using std::max;
+ using std::swap;
+
+ typedef typename FieldTraits<value_type>::real_type real_type;
+
+ // LU decomposition of A in A
+ for (size_type i=0; i<A.rows(); i++) // loop over all rows
+ {
+ real_type pivmax = fvmeta::absreal(A[i][i]);
+
+ if (doPivoting)
+ {
+ // compute maximum of column
+ simd_index_type imax=i;
+ for (size_type k=i+1; k<A.rows(); k++)
+ {
+ auto abs = fvmeta::absreal(A[k][i]);
+ auto mask = abs > pivmax;
+ pivmax = Simd::cond(mask, abs, pivmax);
+ imax = Simd::cond(mask, simd_index_type(k), imax);
+ }
+ // swap rows
+ for (size_type j=0; j<A.rows(); j++)
+ {
+ // This is a swap operation where the second operand is scattered,
+ // and on top of that is also extracted from deep within a
+ // moderately complicated data structure (a DenseMatrix), where we
+ // can't assume much on the memory layout. On intel processors,
+ // the only instruction that might help us here is vgather, but it
+ // is unclear whether that is even faster than a software
+ // implementation, and we would also need vscatter which does not
+ // exist. So break vectorization here and do it manually.
+ for(std::size_t l = 0; l < Simd::lanes(A[i][j]); ++l)
+ swap(Simd::lane(l, A[i][j]),
+ Simd::lane(l, A[Simd::lane(l, imax)][j]));
+ }
+ func.swap(i, imax); // swap the pivot or rhs
+ }
+
+ // singular ?
+ nonsingularLanes = nonsingularLanes && (pivmax != real_type(0));
+ if (throwEarly) {
+ if(!Simd::allTrue(nonsingularLanes))
+ DUNE_THROW(FMatrixError, "matrix is singular");
+ }
+ else { // !throwEarly
+ if(!Simd::anyTrue(nonsingularLanes))
+ return;
+ }
+
+ // eliminate
+ for (size_type k=i+1; k<A.rows(); k++)
+ {
+ // in the simd case, A[i][i] may be close to zero in some lanes. Pray
+ // that the result is no worse than a quiet NaN.
+ field_type factor = A[k][i]/A[i][i];
+ A[k][i] = factor;
+ for (size_type j=i+1; j<A.rows(); j++)
+ A[k][j] -= factor*A[i][j];
+ func(factor, k, i);
+ }
+ }
+ }
+
+ template<typename MAT>
+ template <class V1, class V2>
+ inline void DenseMatrix<MAT>::solve(V1& x, const V2& b, bool doPivoting) const
+ {
+ using real_type = typename FieldTraits<value_type>::real_type;
+ // never mind those ifs, because they get optimized away
+ if (rows()!=cols())
+ DUNE_THROW(FMatrixError, "Can't solve for a " << rows() << "x" << cols() << " matrix!");
+
+ if (rows()==1) {
+
+#ifdef DUNE_FMatrix_WITH_CHECKING
+ if (Simd::anyTrue(fvmeta::absreal((*this)[0][0])
+ < FMatrixPrecision<>::absolute_limit()))
+ DUNE_THROW(FMatrixError,"matrix is singular");
+#endif
+ x[0] = b[0]/(*this)[0][0];
+
+ }
+ else if (rows()==2) {
+
+ field_type detinv = (*this)[0][0]*(*this)[1][1]-(*this)[0][1]*(*this)[1][0];
+#ifdef DUNE_FMatrix_WITH_CHECKING
+ if (Simd::anyTrue(fvmeta::absreal(detinv)
+ < FMatrixPrecision<>::absolute_limit()))
+ DUNE_THROW(FMatrixError,"matrix is singular");
+#endif
+ detinv = real_type(1.0)/detinv;
+
+ x[0] = detinv*((*this)[1][1]*b[0]-(*this)[0][1]*b[1]);
+ x[1] = detinv*((*this)[0][0]*b[1]-(*this)[1][0]*b[0]);
+
+ }
+ else if (rows()==3) {
+
+ field_type d = determinant(doPivoting);
+#ifdef DUNE_FMatrix_WITH_CHECKING
+ if (Simd::anyTrue(fvmeta::absreal(d)
+ < FMatrixPrecision<>::absolute_limit()))
+ DUNE_THROW(FMatrixError,"matrix is singular");
+#endif
+
+ x[0] = (b[0]*(*this)[1][1]*(*this)[2][2] - b[0]*(*this)[2][1]*(*this)[1][2]
+ - b[1] *(*this)[0][1]*(*this)[2][2] + b[1]*(*this)[2][1]*(*this)[0][2]
+ + b[2] *(*this)[0][1]*(*this)[1][2] - b[2]*(*this)[1][1]*(*this)[0][2]) / d;
+
+ x[1] = ((*this)[0][0]*b[1]*(*this)[2][2] - (*this)[0][0]*b[2]*(*this)[1][2]
+ - (*this)[1][0] *b[0]*(*this)[2][2] + (*this)[1][0]*b[2]*(*this)[0][2]
+ + (*this)[2][0] *b[0]*(*this)[1][2] - (*this)[2][0]*b[1]*(*this)[0][2]) / d;
+
+ x[2] = ((*this)[0][0]*(*this)[1][1]*b[2] - (*this)[0][0]*(*this)[2][1]*b[1]
+ - (*this)[1][0] *(*this)[0][1]*b[2] + (*this)[1][0]*(*this)[2][1]*b[0]
+ + (*this)[2][0] *(*this)[0][1]*b[1] - (*this)[2][0]*(*this)[1][1]*b[0]) / d;
+
+ }
+ else {
+
+ V1& rhs = x; // use x to store rhs
+ rhs = b; // copy data
+ Elim<V1> elim(rhs);
+ AutonomousValue<MAT> A(asImp());
+ Simd::Mask<typename FieldTraits<value_type>::real_type>
+ nonsingularLanes(true);
+
+ AutonomousValue<MAT>::luDecomposition(A, elim, nonsingularLanes, true, doPivoting);
+
+ // backsolve
+ for(int i=rows()-1; i>=0; i--) {
+ for (size_type j=i+1; j<rows(); j++)
+ rhs[i] -= A[i][j]*x[j];
+ x[i] = rhs[i]/A[i][i];
+ }
+ }
+ }
+
+ template<typename MAT>
+ inline void DenseMatrix<MAT>::invert(bool doPivoting)
+ {
+ using real_type = typename FieldTraits<MAT>::real_type;
+ using std::swap;
+
+ // never mind those ifs, because they get optimized away
+ if (rows()!=cols())
+ DUNE_THROW(FMatrixError, "Can't invert a " << rows() << "x" << cols() << " matrix!");
+
+ if (rows()==1) {
+
+#ifdef DUNE_FMatrix_WITH_CHECKING
+ if (Simd::anyTrue(fvmeta::absreal((*this)[0][0])
+ < FMatrixPrecision<>::absolute_limit()))
+ DUNE_THROW(FMatrixError,"matrix is singular");
+#endif
+ (*this)[0][0] = real_type( 1 ) / (*this)[0][0];
+
+ }
+ else if (rows()==2) {
+
+ field_type detinv = (*this)[0][0]*(*this)[1][1]-(*this)[0][1]*(*this)[1][0];
+#ifdef DUNE_FMatrix_WITH_CHECKING
+ if (Simd::anyTrue(fvmeta::absreal(detinv)
+ < FMatrixPrecision<>::absolute_limit()))
+ DUNE_THROW(FMatrixError,"matrix is singular");
+#endif
+ detinv = real_type( 1 ) / detinv;
+
+ field_type temp=(*this)[0][0];
+ (*this)[0][0] = (*this)[1][1]*detinv;
+ (*this)[0][1] = -(*this)[0][1]*detinv;
+ (*this)[1][0] = -(*this)[1][0]*detinv;
+ (*this)[1][1] = temp*detinv;
+
+ }
+ else if (rows()==3)
+ {
+ using K = field_type;
+ // code generated by maple
+ K t4 = (*this)[0][0] * (*this)[1][1];
+ K t6 = (*this)[0][0] * (*this)[1][2];
+ K t8 = (*this)[0][1] * (*this)[1][0];
+ K t10 = (*this)[0][2] * (*this)[1][0];
+ K t12 = (*this)[0][1] * (*this)[2][0];
+ K t14 = (*this)[0][2] * (*this)[2][0];
+
+ K det = (t4*(*this)[2][2]-t6*(*this)[2][1]-t8*(*this)[2][2]+
+ t10*(*this)[2][1]+t12*(*this)[1][2]-t14*(*this)[1][1]);
+ K t17 = K(1.0)/det;
+
+ K matrix01 = (*this)[0][1];
+ K matrix00 = (*this)[0][0];
+ K matrix10 = (*this)[1][0];
+ K matrix11 = (*this)[1][1];
+
+ (*this)[0][0] = ((*this)[1][1] * (*this)[2][2] - (*this)[1][2] * (*this)[2][1])*t17;
+ (*this)[0][1] = -((*this)[0][1] * (*this)[2][2] - (*this)[0][2] * (*this)[2][1])*t17;
+ (*this)[0][2] = (matrix01 * (*this)[1][2] - (*this)[0][2] * (*this)[1][1])*t17;
+ (*this)[1][0] = -((*this)[1][0] * (*this)[2][2] - (*this)[1][2] * (*this)[2][0])*t17;
+ (*this)[1][1] = (matrix00 * (*this)[2][2] - t14) * t17;
+ (*this)[1][2] = -(t6-t10) * t17;
+ (*this)[2][0] = (matrix10 * (*this)[2][1] - matrix11 * (*this)[2][0]) * t17;
+ (*this)[2][1] = -(matrix00 * (*this)[2][1] - t12) * t17;
+ (*this)[2][2] = (t4-t8) * t17;
+ }
+ else {
+ using std::swap;
+
+ AutonomousValue<MAT> A(asImp());
+ std::vector<simd_index_type> pivot(rows());
+ Simd::Mask<typename FieldTraits<value_type>::real_type>
+ nonsingularLanes(true);
+ AutonomousValue<MAT>::luDecomposition(A, ElimPivot(pivot), nonsingularLanes, true, doPivoting);
+ auto& L=A;
+ auto& U=A;
+
+ // initialize inverse
+ *this=field_type();
+
+ for(size_type i=0; i<rows(); ++i)
+ (*this)[i][i]=1;
+
+ // L Y = I; multiple right hand sides
+ for (size_type i=0; i<rows(); i++)
+ for (size_type j=0; j<i; j++)
+ for (size_type k=0; k<rows(); k++)
+ (*this)[i][k] -= L[i][j]*(*this)[j][k];
+
+ // U A^{-1} = Y
+ for (size_type i=rows(); i>0;) {
+ --i;
+ for (size_type k=0; k<rows(); k++) {
+ for (size_type j=i+1; j<rows(); j++)
+ (*this)[i][k] -= U[i][j]*(*this)[j][k];
+ (*this)[i][k] /= U[i][i];
+ }
+ }
+
+ for(size_type i=rows(); i>0; ) {
+ --i;
+ for(std::size_t l = 0; l < Simd::lanes((*this)[0][0]); ++l)
+ {
+ std::size_t pi = Simd::lane(l, pivot[i]);
+ if(i!=pi)
+ for(size_type j=0; j<rows(); ++j)
+ swap(Simd::lane(l, (*this)[j][pi]),
+ Simd::lane(l, (*this)[j][ i]));
+ }
+ }
+ }
+ }
+
+ // implementation of the determinant
+ template<typename MAT>
+ inline typename DenseMatrix<MAT>::field_type
+ DenseMatrix<MAT>::determinant(bool doPivoting) const
+ {
+ // never mind those ifs, because they get optimized away
+ if (rows()!=cols())
+ DUNE_THROW(FMatrixError, "There is no determinant for a " << rows() << "x" << cols() << " matrix!");
+
+ if (rows()==1)
+ return (*this)[0][0];
+
+ if (rows()==2)
+ return (*this)[0][0]*(*this)[1][1] - (*this)[0][1]*(*this)[1][0];
+
+ if (rows()==3) {
+ // code generated by maple
+ field_type t4 = (*this)[0][0] * (*this)[1][1];
+ field_type t6 = (*this)[0][0] * (*this)[1][2];
+ field_type t8 = (*this)[0][1] * (*this)[1][0];
+ field_type t10 = (*this)[0][2] * (*this)[1][0];
+ field_type t12 = (*this)[0][1] * (*this)[2][0];
+ field_type t14 = (*this)[0][2] * (*this)[2][0];
+
+ return (t4*(*this)[2][2]-t6*(*this)[2][1]-t8*(*this)[2][2]+
+ t10*(*this)[2][1]+t12*(*this)[1][2]-t14*(*this)[1][1]);
+
+ }
+
+ AutonomousValue<MAT> A(asImp());
+ field_type det;
+ Simd::Mask<typename FieldTraits<value_type>::real_type>
+ nonsingularLanes(true);
+
+ AutonomousValue<MAT>::luDecomposition(A, ElimDet(det), nonsingularLanes, false, doPivoting);
+ det = Simd::cond(nonsingularLanes, det, field_type(0));
+
+ for (size_type i = 0; i < rows(); ++i)
+ det *= A[i][i];
+ return det;
+ }
+
+#endif // DOXYGEN
+
+ namespace DenseMatrixHelp {
+
+ //! calculates ret = matrix * x
+ template <typename MAT, typename V1, typename V2>
+ static inline void multAssign(const DenseMatrix<MAT> &matrix, const DenseVector<V1> & x, DenseVector<V2> & ret)
+ {
+ DUNE_ASSERT_BOUNDS(x.size() == matrix.cols());
+ DUNE_ASSERT_BOUNDS(ret.size() == matrix.rows());
+ typedef typename DenseMatrix<MAT>::size_type size_type;
+
+ for(size_type i=0; i<matrix.rows(); ++i)
+ {
+ ret[i] = 0.0;
+ for(size_type j=0; j<matrix.cols(); ++j)
+ {
+ ret[i] += matrix[i][j]*x[j];
+ }
+ }
+ }
+
+#if 0
+ //! calculates ret = matrix^T * x
+ template <typename K, int rows, int cols>
+ static inline void multAssignTransposed( const FieldMatrix<K,rows,cols> &matrix, const FieldVector<K,rows> & x, FieldVector<K,cols> & ret)
+ {
+ typedef typename FieldMatrix<K,rows,cols>::size_type size_type;
+
+ for(size_type i=0; i<cols(); ++i)
+ {
+ ret[i] = 0.0;
+ for(size_type j=0; j<rows(); ++j)
+ ret[i] += matrix[j][i]*x[j];
+ }
+ }
+
+ //! calculates ret = matrix * x
+ template <typename K, int rows, int cols>
+ static inline FieldVector<K,rows> mult(const FieldMatrix<K,rows,cols> &matrix, const FieldVector<K,cols> & x)
+ {
+ FieldVector<K,rows> ret;
+ multAssign(matrix,x,ret);
+ return ret;
+ }
+
+ //! calculates ret = matrix^T * x
+ template <typename K, int rows, int cols>
+ static inline FieldVector<K,cols> multTransposed(const FieldMatrix<K,rows,cols> &matrix, const FieldVector<K,rows> & x)
+ {
+ FieldVector<K,cols> ret;
+ multAssignTransposed( matrix, x, ret );
+ return ret;
+ }
+#endif
+
+ } // end namespace DenseMatrixHelp
+
+ /** \brief Sends the matrix to an output stream */
+ template<typename MAT>
+ std::ostream& operator<< (std::ostream& s, const DenseMatrix<MAT>& a)
+ {
+ for (typename DenseMatrix<MAT>::size_type i=0; i<a.rows(); i++)
+ s << a[i] << std::endl;
+ return s;
+ }
+
+ /** @} end documentation */
+
+} // end namespace Dune
+
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_DENSEVECTOR_HH
+#define DUNE_DENSEVECTOR_HH
+
+#include <algorithm>
+#include <limits>
+#include <type_traits>
+
+#include "genericiterator.hh"
+#include "ftraits.hh"
+#include "matvectraits.hh"
+#include "promotiontraits.hh"
+#include "dotproduct.hh"
+#include "boundschecking.hh"
+
+namespace Dune {
+
+ // forward declaration of template
+ template<typename V> class DenseVector;
+
+ template<typename V>
+ struct FieldTraits< DenseVector<V> >
+ {
+ typedef typename FieldTraits< typename DenseMatVecTraits<V>::value_type >::field_type field_type;
+ typedef typename FieldTraits< typename DenseMatVecTraits<V>::value_type >::real_type real_type;
+ };
+
+ /** @defgroup DenseMatVec Dense Matrix and Vector Template Library
+ @ingroup Common
+ @{
+ */
+
+ /*! \file
+ * \brief Implements the dense vector interface, with an exchangeable storage class
+ */
+
+ namespace fvmeta
+ {
+ /**
+ \private
+ \memberof Dune::DenseVector
+ */
+ template<class K>
+ inline typename FieldTraits<K>::real_type absreal (const K& k)
+ {
+ using std::abs;
+ return abs(k);
+ }
+
+ /**
+ \private
+ \memberof Dune::DenseVector
+ */
+ template<class K>
+ inline typename FieldTraits<K>::real_type absreal (const std::complex<K>& c)
+ {
+ using std::abs;
+ return abs(c.real()) + abs(c.imag());
+ }
+
+ /**
+ \private
+ \memberof Dune::DenseVector
+ */
+ template<class K>
+ inline typename FieldTraits<K>::real_type abs2 (const K& k)
+ {
+ return k*k;
+ }
+
+ /**
+ \private
+ \memberof Dune::DenseVector
+ */
+ template<class K>
+ inline typename FieldTraits<K>::real_type abs2 (const std::complex<K>& c)
+ {
+ return c.real()*c.real() + c.imag()*c.imag();
+ }
+
+ /**
+ \private
+ \memberof Dune::DenseVector
+ */
+ template<class K, bool isInteger = std::numeric_limits<K>::is_integer>
+ struct Sqrt
+ {
+ static inline typename FieldTraits<K>::real_type sqrt (const K& k)
+ {
+ using std::sqrt;
+ return sqrt(k);
+ }
+ };
+
+ /**
+ \private
+ \memberof Dune::DenseVector
+ */
+ template<class K>
+ struct Sqrt<K, true>
+ {
+ static inline typename FieldTraits<K>::real_type sqrt (const K& k)
+ {
+ using std::sqrt;
+ return typename FieldTraits<K>::real_type(sqrt(double(k)));
+ }
+ };
+
+ /**
+ \private
+ \memberof Dune::DenseVector
+ */
+ template<class K>
+ inline typename FieldTraits<K>::real_type sqrt (const K& k)
+ {
+ return Sqrt<K>::sqrt(k);
+ }
+
+ }
+
+ /*! \brief Generic iterator class for dense vector and matrix implementations
+
+ provides sequential access to DenseVector, FieldVector and FieldMatrix
+ */
+ template<class C, class T, class R =T&>
+ class DenseIterator :
+ public Dune::RandomAccessIteratorFacade<DenseIterator<C,T,R>,T, R, std::ptrdiff_t>
+ {
+ friend class DenseIterator<typename std::remove_const<C>::type, typename std::remove_const<T>::type, typename mutable_reference<R>::type >;
+ friend class DenseIterator<const typename std::remove_const<C>::type, const typename std::remove_const<T>::type, typename const_reference<R>::type >;
+
+ typedef DenseIterator<typename std::remove_const<C>::type, typename std::remove_const<T>::type, typename mutable_reference<R>::type > MutableIterator;
+ typedef DenseIterator<const typename std::remove_const<C>::type, const typename std::remove_const<T>::type, typename const_reference<R>::type > ConstIterator;
+ public:
+
+ /**
+ * @brief The type of the difference between two positions.
+ */
+ typedef std::ptrdiff_t DifferenceType;
+
+ /**
+ * @brief The type to index the underlying container.
+ */
+ typedef typename C::size_type SizeType;
+
+ // Constructors needed by the base iterators.
+ DenseIterator()
+ : container_(0), position_()
+ {}
+
+ DenseIterator(C& cont, SizeType pos)
+ : container_(&cont), position_(pos)
+ {}
+
+ DenseIterator(const MutableIterator & other)
+ : container_(other.container_), position_(other.position_)
+ {}
+
+ DenseIterator(const ConstIterator & other)
+ : container_(other.container_), position_(other.position_)
+ {}
+
+ // Methods needed by the forward iterator
+ bool equals(const MutableIterator &other) const
+ {
+ return position_ == other.position_ && container_ == other.container_;
+ }
+
+
+ bool equals(const ConstIterator & other) const
+ {
+ return position_ == other.position_ && container_ == other.container_;
+ }
+
+ R dereference() const {
+ return container_->operator[](position_);
+ }
+
+ void increment(){
+ ++position_;
+ }
+
+ // Additional function needed by BidirectionalIterator
+ void decrement(){
+ --position_;
+ }
+
+ // Additional function needed by RandomAccessIterator
+ R elementAt(DifferenceType i) const {
+ return container_->operator[](position_+i);
+ }
+
+ void advance(DifferenceType n){
+ position_=position_+n;
+ }
+
+ DifferenceType distanceTo(DenseIterator<const typename std::remove_const<C>::type,const typename std::remove_const<T>::type> other) const
+ {
+ assert(other.container_==container_);
+ return static_cast< DifferenceType >( other.position_ ) - static_cast< DifferenceType >( position_ );
+ }
+
+ DifferenceType distanceTo(DenseIterator<typename std::remove_const<C>::type, typename std::remove_const<T>::type> other) const
+ {
+ assert(other.container_==container_);
+ return static_cast< DifferenceType >( other.position_ ) - static_cast< DifferenceType >( position_ );
+ }
+
+ //! return index
+ SizeType index () const
+ {
+ return this->position_;
+ }
+
+ private:
+ C *container_;
+ SizeType position_;
+ };
+
+ /** \brief Interface for a class of dense vectors over a given field.
+ *
+ * \tparam V implementation class of the vector
+ */
+ template<typename V>
+ class DenseVector
+ {
+ typedef DenseMatVecTraits<V> Traits;
+ // typedef typename Traits::value_type K;
+
+ // Curiously recurring template pattern
+ V & asImp() { return static_cast<V&>(*this); }
+ const V & asImp() const { return static_cast<const V&>(*this); }
+
+ protected:
+ // construction allowed to derived classes only
+ constexpr DenseVector() = default;
+ // copying only allowed by derived classes
+ DenseVector(const DenseVector&) = default;
+
+ public:
+ //===== type definitions and constants
+
+ //! type of derived vector class
+ typedef typename Traits::derived_type derived_type;
+
+ //! export the type representing the field
+ typedef typename Traits::value_type value_type;
+
+ //! export the type representing the field
+ typedef typename FieldTraits< value_type >::field_type field_type;
+
+ //! export the type representing the components
+ typedef typename Traits::value_type block_type;
+
+ //! The type used for the index access and size operation
+ typedef typename Traits::size_type size_type;
+
+ //! We are at the leaf of the block recursion
+ enum {
+ //! The number of block levels we contain
+ blocklevel = 1
+ };
+
+ //===== assignment from scalar
+ //! Assignment operator for scalar
+ inline derived_type& operator= (const value_type& k)
+ {
+ for (size_type i=0; i<size(); i++)
+ asImp()[i] = k;
+ return asImp();
+ }
+
+ //===== assignment from other DenseVectors
+ protected:
+ //! Assignment operator for other DenseVector of same type
+ DenseVector& operator=(const DenseVector&) = default;
+
+ public:
+
+ //! Assignment operator for other DenseVector of different type
+ template <typename W,
+ std::enable_if_t<
+ std::is_assignable<value_type&, typename DenseVector<W>::value_type>::value, int> = 0>
+ derived_type& operator= (const DenseVector<W>& other)
+ {
+ assert(other.size() == size());
+ for (size_type i=0; i<size(); i++)
+ asImp()[i] = other[i];
+ return asImp();
+ }
+
+ //===== access to components
+
+ //! random access
+ value_type & operator[] (size_type i)
+ {
+ return asImp()[i];
+ }
+
+ const value_type & operator[] (size_type i) const
+ {
+ return asImp()[i];
+ }
+
+ //! return reference to first element
+ value_type& front()
+ {
+ return asImp()[0];
+ }
+
+ //! return reference to first element
+ const value_type& front() const
+ {
+ return asImp()[0];
+ }
+
+ //! return reference to last element
+ value_type& back()
+ {
+ return asImp()[size()-1];
+ }
+
+ //! return reference to last element
+ const value_type& back() const
+ {
+ return asImp()[size()-1];
+ }
+
+ //! checks whether the container is empty
+ bool empty() const
+ {
+ return size() == 0;
+ }
+
+ //! size method
+ size_type size() const
+ {
+ return asImp().size();
+ }
+
+ //! Iterator class for sequential access
+ typedef DenseIterator<DenseVector,value_type> Iterator;
+ //! typedef for stl compliant access
+ typedef Iterator iterator;
+
+ //! begin iterator
+ Iterator begin ()
+ {
+ return Iterator(*this,0);
+ }
+
+ //! end iterator
+ Iterator end ()
+ {
+ return Iterator(*this,size());
+ }
+
+ //! @returns an iterator that is positioned before
+ //! the end iterator of the vector, i.e. at the last entry.
+ Iterator beforeEnd ()
+ {
+ return Iterator(*this,size()-1);
+ }
+
+ //! @returns an iterator that is positioned before
+ //! the first entry of the vector.
+ Iterator beforeBegin ()
+ {
+ return Iterator(*this,-1);
+ }
+
+ //! return iterator to given element or end()
+ Iterator find (size_type i)
+ {
+ return Iterator(*this,std::min(i,size()));
+ }
+
+ //! ConstIterator class for sequential access
+ typedef DenseIterator<const DenseVector,const value_type> ConstIterator;
+ //! typedef for stl compliant access
+ typedef ConstIterator const_iterator;
+
+ //! begin ConstIterator
+ ConstIterator begin () const
+ {
+ return ConstIterator(*this,0);
+ }
+
+ //! end ConstIterator
+ ConstIterator end () const
+ {
+ return ConstIterator(*this,size());
+ }
+
+ //! @returns an iterator that is positioned before
+ //! the end iterator of the vector. i.e. at the last element
+ ConstIterator beforeEnd () const
+ {
+ return ConstIterator(*this,size()-1);
+ }
+
+ //! @returns an iterator that is positioned before
+ //! the first entry of the vector.
+ ConstIterator beforeBegin () const
+ {
+ return ConstIterator(*this,-1);
+ }
+
+ //! return iterator to given element or end()
+ ConstIterator find (size_type i) const
+ {
+ return ConstIterator(*this,std::min(i,size()));
+ }
+
+ //===== vector space arithmetic
+
+ //! vector space addition
+ template <class Other>
+ derived_type& operator+= (const DenseVector<Other>& x)
+ {
+ DUNE_ASSERT_BOUNDS(x.size() == size());
+ for (size_type i=0; i<size(); i++)
+ (*this)[i] += x[i];
+ return asImp();
+ }
+
+ //! vector space subtraction
+ template <class Other>
+ derived_type& operator-= (const DenseVector<Other>& x)
+ {
+ DUNE_ASSERT_BOUNDS(x.size() == size());
+ for (size_type i=0; i<size(); i++)
+ (*this)[i] -= x[i];
+ return asImp();
+ }
+
+ //! Binary vector addition
+ template <class Other>
+ derived_type operator+ (const DenseVector<Other>& b) const
+ {
+ derived_type z = asImp();
+ return (z+=b);
+ }
+
+ //! Binary vector subtraction
+ template <class Other>
+ derived_type operator- (const DenseVector<Other>& b) const
+ {
+ derived_type z = asImp();
+ return (z-=b);
+ }
+
+ //! Vector negation
+ derived_type operator- () const
+ {
+ V result;
+ using idx_type = typename decltype(result)::size_type;
+
+ for (idx_type i = 0; i < size(); ++i)
+ result[i] = -asImp()[i];
+
+ return result;
+ }
+
+ //! \brief vector space add scalar to all comps
+ /**
+ we use enable_if to avoid an ambiguity, if the
+ function parameter can be converted to value_type implicitly.
+ (see FS#1457)
+
+ The function is only enabled, if the parameter is directly
+ convertible to value_type.
+ */
+ template <typename ValueType>
+ typename std::enable_if<
+ std::is_convertible<ValueType, value_type>::value,
+ derived_type
+ >::type&
+ operator+= (const ValueType& kk)
+ {
+ const value_type& k = kk;
+ for (size_type i=0; i<size(); i++)
+ (*this)[i] += k;
+ return asImp();
+ }
+
+ //! \brief vector space subtract scalar from all comps
+ /**
+ we use enable_if to avoid an ambiguity, if the
+ function parameter can be converted to value_type implicitly.
+ (see FS#1457)
+
+ The function is only enabled, if the parameter is directly
+ convertible to value_type.
+ */
+ template <typename ValueType>
+ typename std::enable_if<
+ std::is_convertible<ValueType, value_type>::value,
+ derived_type
+ >::type&
+ operator-= (const ValueType& kk)
+ {
+ const value_type& k = kk;
+ for (size_type i=0; i<size(); i++)
+ (*this)[i] -= k;
+ return asImp();
+ }
+
+ //! \brief vector space multiplication with scalar
+ /**
+ we use enable_if to avoid an ambiguity, if the
+ function parameter can be converted to field_type implicitly.
+ (see FS#1457)
+
+ The function is only enabled, if the parameter is directly
+ convertible to field_type.
+ */
+ template <typename FieldType>
+ typename std::enable_if<
+ std::is_convertible<FieldType, field_type>::value,
+ derived_type
+ >::type&
+ operator*= (const FieldType& kk)
+ {
+ const field_type& k = kk;
+ for (size_type i=0; i<size(); i++)
+ (*this)[i] *= k;
+ return asImp();
+ }
+
+ //! \brief vector space division by scalar
+ /**
+ we use enable_if to avoid an ambiguity, if the
+ function parameter can be converted to field_type implicitly.
+ (see FS#1457)
+
+ The function is only enabled, if the parameter is directly
+ convertible to field_type.
+ */
+ template <typename FieldType>
+ typename std::enable_if<
+ std::is_convertible<FieldType, field_type>::value,
+ derived_type
+ >::type&
+ operator/= (const FieldType& kk)
+ {
+ const field_type& k = kk;
+ for (size_type i=0; i<size(); i++)
+ (*this)[i] /= k;
+ return asImp();
+ }
+
+ //! Binary vector comparison
+ template <class Other>
+ bool operator== (const DenseVector<Other>& x) const
+ {
+ DUNE_ASSERT_BOUNDS(x.size() == size());
+ for (size_type i=0; i<size(); i++)
+ if ((*this)[i]!=x[i])
+ return false;
+
+ return true;
+ }
+
+ //! Binary vector incomparison
+ template <class Other>
+ bool operator!= (const DenseVector<Other>& x) const
+ {
+ return !operator==(x);
+ }
+
+
+ //! vector space axpy operation ( *this += a x )
+ template <class Other>
+ derived_type& axpy (const field_type& a, const DenseVector<Other>& x)
+ {
+ DUNE_ASSERT_BOUNDS(x.size() == size());
+ for (size_type i=0; i<size(); i++)
+ (*this)[i] += a*x[i];
+ return asImp();
+ }
+
+ /**
+ * \brief indefinite vector dot product \f$\left (x^T \cdot y \right)\f$ which corresponds to Petsc's VecTDot
+ *
+ * http://www.mcs.anl.gov/petsc/petsc-current/docs/manualpages/Vec/VecTDot.html
+ * @param x other vector
+ * @return
+ */
+ template<class Other>
+ typename PromotionTraits<field_type,typename DenseVector<Other>::field_type>::PromotedType operator* (const DenseVector<Other>& x) const {
+ typedef typename PromotionTraits<field_type, typename DenseVector<Other>::field_type>::PromotedType PromotedType;
+ PromotedType result(0);
+ assert(x.size() == size());
+ for (size_type i=0; i<size(); i++) {
+ result += PromotedType((*this)[i]*x[i]);
+ }
+ return result;
+ }
+
+ /**
+ * @brief vector dot product \f$\left (x^H \cdot y \right)\f$ which corresponds to Petsc's VecDot
+ *
+ * http://www.mcs.anl.gov/petsc/petsc-current/docs/manualpages/Vec/VecDot.html
+ * @param x other vector
+ * @return
+ */
+ template<class Other>
+ typename PromotionTraits<field_type,typename DenseVector<Other>::field_type>::PromotedType dot(const DenseVector<Other>& x) const {
+ typedef typename PromotionTraits<field_type, typename DenseVector<Other>::field_type>::PromotedType PromotedType;
+ PromotedType result(0);
+ assert(x.size() == size());
+ for (size_type i=0; i<size(); i++) {
+ result += Dune::dot((*this)[i],x[i]);
+ }
+ return result;
+ }
+
+ //===== norms
+
+ //! one norm (sum over absolute values of entries)
+ typename FieldTraits<value_type>::real_type one_norm() const {
+ using std::abs;
+ typename FieldTraits<value_type>::real_type result( 0 );
+ for (size_type i=0; i<size(); i++)
+ result += abs((*this)[i]);
+ return result;
+ }
+
+
+ //! simplified one norm (uses Manhattan norm for complex values)
+ typename FieldTraits<value_type>::real_type one_norm_real () const
+ {
+ typename FieldTraits<value_type>::real_type result( 0 );
+ for (size_type i=0; i<size(); i++)
+ result += fvmeta::absreal((*this)[i]);
+ return result;
+ }
+
+ //! two norm sqrt(sum over squared values of entries)
+ typename FieldTraits<value_type>::real_type two_norm () const
+ {
+ typename FieldTraits<value_type>::real_type result( 0 );
+ for (size_type i=0; i<size(); i++)
+ result += fvmeta::abs2((*this)[i]);
+ return fvmeta::sqrt(result);
+ }
+
+ //! square of two norm (sum over squared values of entries), need for block recursion
+ typename FieldTraits<value_type>::real_type two_norm2 () const
+ {
+ typename FieldTraits<value_type>::real_type result( 0 );
+ for (size_type i=0; i<size(); i++)
+ result += fvmeta::abs2((*this)[i]);
+ return result;
+ }
+
+ //! infinity norm (maximum of absolute values of entries)
+ template <typename vt = value_type,
+ typename std::enable_if<!HasNaN<vt>::value, int>::type = 0>
+ typename FieldTraits<vt>::real_type infinity_norm() const {
+ using real_type = typename FieldTraits<vt>::real_type;
+ using std::abs;
+ using std::max;
+
+ real_type norm = 0;
+ for (auto const &x : *this) {
+ real_type const a = abs(x);
+ norm = max(a, norm);
+ }
+ return norm;
+ }
+
+ //! simplified infinity norm (uses Manhattan norm for complex values)
+ template <typename vt = value_type,
+ typename std::enable_if<!HasNaN<vt>::value, int>::type = 0>
+ typename FieldTraits<vt>::real_type infinity_norm_real() const {
+ using real_type = typename FieldTraits<vt>::real_type;
+ using std::max;
+
+ real_type norm = 0;
+ for (auto const &x : *this) {
+ real_type const a = fvmeta::absreal(x);
+ norm = max(a, norm);
+ }
+ return norm;
+ }
+
+ //! infinity norm (maximum of absolute values of entries)
+ template <typename vt = value_type,
+ typename std::enable_if<HasNaN<vt>::value, int>::type = 0>
+ typename FieldTraits<vt>::real_type infinity_norm() const {
+ using real_type = typename FieldTraits<vt>::real_type;
+ using std::abs;
+ using std::max;
+
+ real_type norm = 0;
+ real_type isNaN = 1;
+ for (auto const &x : *this) {
+ real_type const a = abs(x);
+ norm = max(a, norm);
+ isNaN += a;
+ }
+ return norm * (isNaN / isNaN);
+ }
+
+ //! simplified infinity norm (uses Manhattan norm for complex values)
+ template <typename vt = value_type,
+ typename std::enable_if<HasNaN<vt>::value, int>::type = 0>
+ typename FieldTraits<vt>::real_type infinity_norm_real() const {
+ using real_type = typename FieldTraits<vt>::real_type;
+ using std::max;
+
+ real_type norm = 0;
+ real_type isNaN = 1;
+ for (auto const &x : *this) {
+ real_type const a = fvmeta::absreal(x);
+ norm = max(a, norm);
+ isNaN += a;
+ }
+ return norm * (isNaN / isNaN);
+ }
+
+ //===== sizes
+
+ //! number of blocks in the vector (are of size 1 here)
+ size_type N () const
+ {
+ return size();
+ }
+
+ //! dimension of the vector space
+ size_type dim () const
+ {
+ return size();
+ }
+
+ };
+
+ /** \brief Write a DenseVector to an output stream
+ * \relates DenseVector
+ *
+ * \param[in] s std :: ostream to write to
+ * \param[in] v DenseVector to write
+ *
+ * \returns the output stream (s)
+ */
+ template<typename V>
+ std::ostream& operator<< (std::ostream& s, const DenseVector<V>& v)
+ {
+ for (typename DenseVector<V>::size_type i=0; i<v.size(); i++)
+ s << ((i>0) ? " " : "") << v[i];
+ return s;
+ }
+
+ /** @} end documentation */
+
+} // end namespace
+
+#endif // DUNE_DENSEVECTOR_HH
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_DEPRECATED_HH
+#define DUNE_DEPRECATED_HH
+
+/** \file
+ * \brief Definition of the DUNE_DEPRECATED macro for the case that config.h
+ * is not available
+ */
+
+//! @addtogroup CxxUtilities
+//! @{
+#if defined(DOXYGEN) || !defined(HAS_ATTRIBUTE_DEPRECATED)
+//! Mark some entity as deprecated
+/**
+ * \deprecated Use C++14's \code[[deprecated]]\endcode instead. It will be
+ * removed after Dune 2.8. Be aware that it must be sometimes placed at
+ * different position in the code.
+ */
+#define DUNE_DEPRECATED
+#else // defined(HAS_ATTRIBUTE_DEPRECATED)
+#define DUNE_DEPRECATED __attribute__((deprecated))
+#endif
+
+#if defined(DOXYGEN) || !defined(HAS_ATTRIBUTE_DEPRECATED_MSG)
+//! Mark some entity as deprecated
+/**
+ * \deprecated Use C++14's \code[[deprecated(msg)]]\endcode instead. It
+ * will be removed after Dune 2.8. Be aware that it must be sometimes
+ * placed at different position in the code.
+ */
+#define DUNE_DEPRECATED_MSG(text) DUNE_DEPRECATED
+#else // defined(HAS_ATTRIBUTE_DEPRECATED_MSG)
+#define DUNE_DEPRECATED_MSG(text) __attribute__((deprecated(# text)))
+#endif
+
+#ifdef DOXYGEN
+/**
+ * \brief Ignore deprecation warnings (start)
+ *
+ * This macro can be used together with `DUNE_NO_DEPRECATED_END` to mark a
+ * block in which deprecation warnings are ignored. This can be useful for
+ * implementations of deprecated methods that call other deprecated methods
+ * or for testing deprecated methods in the testsuite.
+ *
+ * \code
+ DUNE_NO_DEPRECATED_BEGIN
+ some_deprecated_function();
+ another_deprecated_function();
+ DUNE_NO_DEPRECATED_END
+ * \endcode
+ *
+ * \warning This macro must always be used together with `DUNE_NO_DEPRECATED_END`
+ */
+#define DUNE_NO_DEPRECATED_BEGIN ...
+/**
+ * \brief Ignore deprecation warnings (end)
+ *
+ * \warning This macro must always be used together with `DUNE_NO_DEPRECATED_BEGIN`
+ */
+#define DUNE_NO_DEPRECATED_END ...
+#else
+# if defined __clang__
+# define DUNE_NO_DEPRECATED_BEGIN \
+ _Pragma("clang diagnostic push") \
+ _Pragma("clang diagnostic ignored \"-Wdeprecated-declarations\"")
+# define DUNE_NO_DEPRECATED_END _Pragma("clang diagnostic pop")
+# elif defined __INTEL_COMPILER
+# define DUNE_NO_DEPRECATED_BEGIN \
+ _Pragma("warning push") \
+ _Pragma("warning(disable:1478)") \
+ _Pragma("warning(disable:1786)")
+# define DUNE_NO_DEPRECATED_END _Pragma("warning pop")
+# elif defined __GNUC__
+# define DUNE_NO_DEPRECATED_BEGIN \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"")
+# define DUNE_NO_DEPRECATED_END _Pragma("GCC diagnostic pop")
+# else
+# define DUNE_NO_DEPRECATED_BEGIN /* Noop. */
+# define DUNE_NO_DEPRECATED_END /* Noop. */
+# endif
+#endif
+
+//! @}
+
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_DIAGONAL_MATRIX_HH
+#define DUNE_DIAGONAL_MATRIX_HH
+
+/*! \file
+ \brief This file implements a quadratic diagonal matrix of fixed size.
+ */
+
+#include <algorithm>
+#include <cassert>
+#include <cmath>
+#include <complex>
+#include <cstddef>
+#include <initializer_list>
+#include <iostream>
+#include <memory>
+
+#include <dune/common/boundschecking.hh>
+#include <dune/common/densematrix.hh>
+#include <dune/common/exceptions.hh>
+#include <dune/common/fmatrix.hh>
+#include <dune/common/fvector.hh>
+#include <dune/common/genericiterator.hh>
+#include <dune/common/typetraits.hh>
+
+
+namespace Dune {
+
+ template< class K, int n > class DiagonalRowVectorConst;
+ template< class K, int n > class DiagonalRowVector;
+ template< class DiagonalMatrixType > class DiagonalMatrixWrapper;
+ template< class C, class T, class R> class ContainerWrapperIterator;
+
+ /**
+ @addtogroup DenseMatVec
+ @{
+ */
+
+ /**
+ *@brief A diagonal matrix of static size.
+ *
+ * This is meant to be a replacement of FieldMatrix for the case that
+ * it is a diagonal matrix.
+ *
+ * \tparam K Type used for scalars
+ * \tparam n Matrix size
+ */
+ template<class K, int n>
+ class DiagonalMatrix
+ {
+ typedef DiagonalMatrixWrapper< DiagonalMatrix<K,n> > WrapperType;
+
+ public:
+ //===== type definitions and constants
+
+ //! export the type representing the field
+ typedef K value_type;
+ typedef value_type field_type;
+
+ //! export the type representing the components
+ typedef K block_type;
+
+ //! The type used for the index access and size operations.
+ typedef std::size_t size_type;
+
+ //! We are at the leaf of the block recursion
+ enum {
+ //! The number of block levels we contain. This is 1.
+ blocklevel = 1
+ };
+
+ //! Each row is implemented by a field vector
+ typedef DiagonalRowVector<K,n> row_type;
+ typedef row_type reference;
+ typedef row_type row_reference;
+ typedef DiagonalRowVectorConst<K,n> const_row_type;
+ typedef const_row_type const_reference;
+ typedef const_row_type const_row_reference;
+
+ //! export size
+ enum {
+ //! The number of rows
+ rows = n,
+ //! The number of columns
+ cols = n
+ };
+
+ //==== size
+
+ static constexpr size_type size ()
+ {
+ return rows;
+ }
+
+ //===== constructors
+
+ //! Default constructor
+ constexpr DiagonalMatrix() = default;
+
+ //! Constructor initializing the whole matrix with a scalar
+ DiagonalMatrix (const K& k)
+ : diag_(k)
+ {}
+
+ //! Constructor initializing the diagonal with a vector
+ DiagonalMatrix (const FieldVector<K,n>& diag)
+ : diag_(diag)
+ {}
+
+ /** \brief Construct diagonal matrix from an initializer list
+ *
+ * The elements of the list are copied into the diagonal elements of the matrix.
+ * If the initializer list is shorter than the matrix diagonal (which has n elements),
+ * the remaining matrix diagonal elements are left uninitialized. If the initializer
+ * list is longer, than only the first n elements will be copied into the matrix
+ * diagonal.
+ */
+ DiagonalMatrix (std::initializer_list<K> const &l)
+ {
+ std::copy_n(l.begin(), std::min(static_cast<std::size_t>(rows),
+ l.size()),
+ diag_.begin());
+ }
+
+ /** \brief Assignment from a scalar */
+ DiagonalMatrix& operator= (const K& k)
+ {
+ diag_ = k;
+ return *this;
+ }
+
+ /** \brief Check if matrix is the same object as the other matrix */
+ bool identical(const DiagonalMatrix<K,n>& other) const
+ {
+ return (this==&other);
+ }
+
+ //===== iterator interface to rows of the matrix
+ //! Iterator class for sequential access
+ typedef ContainerWrapperIterator<const WrapperType, reference, reference> Iterator;
+ //! typedef for stl compliant access
+ typedef Iterator iterator;
+ //! rename the iterators for easier access
+ typedef Iterator RowIterator;
+ //! rename the iterators for easier access
+ typedef typename row_type::Iterator ColIterator;
+
+ //! begin iterator
+ Iterator begin ()
+ {
+ return Iterator(WrapperType(this),0);
+ }
+
+ //! end iterator
+ Iterator end ()
+ {
+ return Iterator(WrapperType(this),n);
+ }
+
+ //! @returns an iterator that is positioned before
+ //! the end iterator of the rows, i.e. at the last row.
+ Iterator beforeEnd ()
+ {
+ return Iterator(WrapperType(this),n-1);
+ }
+
+ //! @returns an iterator that is positioned before
+ //! the first row of the matrix.
+ Iterator beforeBegin ()
+ {
+ return Iterator(WrapperType(this),-1);
+ }
+
+
+ //! Iterator class for sequential access
+ typedef ContainerWrapperIterator<const WrapperType, const_reference, const_reference> ConstIterator;
+ //! typedef for stl compliant access
+ typedef ConstIterator const_iterator;
+ //! rename the iterators for easier access
+ typedef ConstIterator ConstRowIterator;
+ //! rename the iterators for easier access
+ typedef typename const_row_type::ConstIterator ConstColIterator;
+
+ //! begin iterator
+ ConstIterator begin () const
+ {
+ return ConstIterator(WrapperType(this),0);
+ }
+
+ //! end iterator
+ ConstIterator end () const
+ {
+ return ConstIterator(WrapperType(this),n);
+ }
+
+ //! @returns an iterator that is positioned before
+ //! the end iterator of the rows. i.e. at the last row.
+ ConstIterator beforeEnd() const
+ {
+ return ConstIterator(WrapperType(this),n-1);
+ }
+
+ //! @returns an iterator that is positioned before
+ //! the first row of the matrix.
+ ConstIterator beforeBegin () const
+ {
+ return ConstIterator(WrapperType(this),-1);
+ }
+
+
+
+ //===== vector space arithmetic
+
+ //! vector space addition
+ DiagonalMatrix& operator+= (const DiagonalMatrix& y)
+ {
+ diag_ += y.diag_;
+ return *this;
+ }
+
+ //! vector space subtraction
+ DiagonalMatrix& operator-= (const DiagonalMatrix& y)
+ {
+ diag_ -= y.diag_;
+ return *this;
+ }
+
+ //! vector space multiplication with scalar
+ DiagonalMatrix& operator+= (const K& k)
+ {
+ diag_ += k;
+ return *this;
+ }
+
+ //! vector space division by scalar
+ DiagonalMatrix& operator-= (const K& k)
+ {
+ diag_ -= k;
+ return *this;
+ }
+
+ //! vector space multiplication with scalar
+ DiagonalMatrix& operator*= (const K& k)
+ {
+ diag_ *= k;
+ return *this;
+ }
+
+ //! vector space division by scalar
+ DiagonalMatrix& operator/= (const K& k)
+ {
+ diag_ /= k;
+ return *this;
+ }
+
+ //===== comparison ops
+
+ //! comparison operator
+ bool operator==(const DiagonalMatrix& other) const
+ {
+ return diag_==other.diagonal();
+ }
+
+ //! incomparison operator
+ bool operator!=(const DiagonalMatrix& other) const
+ {
+ return diag_!=other.diagonal();
+ }
+
+
+ //===== linear maps
+
+ //! y = A x
+ template<class X, class Y>
+ void mv (const X& x, Y& y) const
+ {
+#ifdef DUNE_FMatrix_WITH_CHECKING
+ if (x.N()!=M()) DUNE_THROW(FMatrixError,"index out of range");
+ if (y.N()!=N()) DUNE_THROW(FMatrixError,"index out of range");
+#endif
+ for (size_type i=0; i<n; ++i)
+ y[i] = diag_[i] * x[i];
+ }
+
+ //! y = A^T x
+ template<class X, class Y>
+ void mtv (const X& x, Y& y) const
+ {
+ mv(x, y);
+ }
+
+ //! y += A x
+ template<class X, class Y>
+ void umv (const X& x, Y& y) const
+ {
+#ifdef DUNE_FMatrix_WITH_CHECKING
+ if (x.N()!=M()) DUNE_THROW(FMatrixError,"index out of range");
+ if (y.N()!=N()) DUNE_THROW(FMatrixError,"index out of range");
+#endif
+ for (size_type i=0; i<n; ++i)
+ y[i] += diag_[i] * x[i];
+ }
+
+ //! y += A^T x
+ template<class X, class Y>
+ void umtv (const X& x, Y& y) const
+ {
+#ifdef DUNE_FMatrix_WITH_CHECKING
+ if (x.N()!=N()) DUNE_THROW(FMatrixError,"index out of range");
+ if (y.N()!=M()) DUNE_THROW(FMatrixError,"index out of range");
+#endif
+ for (size_type i=0; i<n; ++i)
+ y[i] += diag_[i] * x[i];
+ }
+
+ //! y += A^H x
+ template<class X, class Y>
+ void umhv (const X& x, Y& y) const
+ {
+#ifdef DUNE_FMatrix_WITH_CHECKING
+ if (x.N()!=N()) DUNE_THROW(FMatrixError,"index out of range");
+ if (y.N()!=M()) DUNE_THROW(FMatrixError,"index out of range");
+#endif
+ for (size_type i=0; i<n; i++)
+ y[i] += conjugateComplex(diag_[i])*x[i];
+ }
+
+ //! y -= A x
+ template<class X, class Y>
+ void mmv (const X& x, Y& y) const
+ {
+#ifdef DUNE_FMatrix_WITH_CHECKING
+ if (x.N()!=M()) DUNE_THROW(FMatrixError,"index out of range");
+ if (y.N()!=N()) DUNE_THROW(FMatrixError,"index out of range");
+#endif
+ for (size_type i=0; i<n; ++i)
+ y[i] -= diag_[i] * x[i];
+ }
+
+ //! y -= A^T x
+ template<class X, class Y>
+ void mmtv (const X& x, Y& y) const
+ {
+#ifdef DUNE_FMatrix_WITH_CHECKING
+ if (x.N()!=N()) DUNE_THROW(FMatrixError,"index out of range");
+ if (y.N()!=M()) DUNE_THROW(FMatrixError,"index out of range");
+#endif
+ for (size_type i=0; i<n; ++i)
+ y[i] -= diag_[i] * x[i];
+ }
+
+ //! y -= A^H x
+ template<class X, class Y>
+ void mmhv (const X& x, Y& y) const
+ {
+#ifdef DUNE_FMatrix_WITH_CHECKING
+ if (x.N()!=N()) DUNE_THROW(FMatrixError,"index out of range");
+ if (y.N()!=M()) DUNE_THROW(FMatrixError,"index out of range");
+#endif
+ for (size_type i=0; i<n; i++)
+ y[i] -= conjugateComplex(diag_[i])*x[i];
+ }
+
+ //! y += alpha A x
+ template<class X, class Y>
+ void usmv (const typename FieldTraits<Y>::field_type & alpha,
+ const X& x, Y& y) const
+ {
+#ifdef DUNE_FMatrix_WITH_CHECKING
+ if (x.N()!=M()) DUNE_THROW(FMatrixError,"index out of range");
+ if (y.N()!=N()) DUNE_THROW(FMatrixError,"index out of range");
+#endif
+ for (size_type i=0; i<n; i++)
+ y[i] += alpha * diag_[i] * x[i];
+ }
+
+ //! y += alpha A^T x
+ template<class X, class Y>
+ void usmtv (const typename FieldTraits<Y>::field_type & alpha,
+ const X& x, Y& y) const
+ {
+#ifdef DUNE_FMatrix_WITH_CHECKING
+ if (x.N()!=N()) DUNE_THROW(FMatrixError,"index out of range");
+ if (y.N()!=M()) DUNE_THROW(FMatrixError,"index out of range");
+#endif
+ for (size_type i=0; i<n; i++)
+ y[i] += alpha * diag_[i] * x[i];
+ }
+
+ //! y += alpha A^H x
+ template<class X, class Y>
+ void usmhv (const typename FieldTraits<Y>::field_type & alpha,
+ const X& x, Y& y) const
+ {
+#ifdef DUNE_FMatrix_WITH_CHECKING
+ if (x.N()!=N()) DUNE_THROW(FMatrixError,"index out of range");
+ if (y.N()!=M()) DUNE_THROW(FMatrixError,"index out of range");
+#endif
+ for (size_type i=0; i<n; i++)
+ y[i] += alpha * conjugateComplex(diag_[i]) * x[i];
+ }
+
+ //===== norms
+
+ //! frobenius norm: sqrt(sum over squared values of entries)
+ double frobenius_norm () const
+ {
+ return diag_.two_norm();
+ }
+
+ //! square of frobenius norm, need for block recursion
+ double frobenius_norm2 () const
+ {
+ return diag_.two_norm2();
+ }
+
+ //! infinity norm (row sum norm, how to generalize for blocks?)
+ double infinity_norm () const
+ {
+ return diag_.infinity_norm();
+ }
+
+ //! simplified infinity norm (uses Manhattan norm for complex values)
+ double infinity_norm_real () const
+ {
+ return diag_.infinity_norm_real();
+ }
+
+
+
+ //===== solve
+
+ //! Solve system A x = b
+ template<class V>
+ void solve (V& x, const V& b) const
+ {
+ for (int i=0; i<n; i++)
+ x[i] = b[i]/diag_[i];
+ }
+
+ //! Compute inverse
+ void invert()
+ {
+ using real_type = typename FieldTraits<K>::real_type;
+ for (int i=0; i<n; i++)
+ diag_[i] = real_type(1.0)/diag_[i];
+ }
+
+ //! calculates the determinant of this matrix
+ K determinant () const
+ {
+ K det = diag_[0];
+ for (int i=1; i<n; i++)
+ det *= diag_[i];
+ return det;
+ }
+
+
+
+ //===== sizes
+
+ //! number of blocks in row direction
+ static constexpr size_type N ()
+ {
+ return n;
+ }
+
+ //! number of blocks in column direction
+ static constexpr size_type M ()
+ {
+ return n;
+ }
+
+
+
+ //===== query
+
+ //! return true when (i,j) is in pattern
+ bool exists (size_type i, size_type j) const
+ {
+ DUNE_ASSERT_BOUNDS(i >= 0 && i < n);
+ DUNE_ASSERT_BOUNDS(j >= 0 && j < n);
+ return i==j;
+ }
+
+
+
+ //! Sends the matrix to an output stream
+ friend std::ostream& operator<< (std::ostream& s, const DiagonalMatrix<K,n>& a)
+ {
+ for (size_type i=0; i<n; i++) {
+ for (size_type j=0; j<n; j++)
+ s << ((i==j) ? a.diag_[i] : 0) << " ";
+ s << std::endl;
+ }
+ return s;
+ }
+
+ //! Return reference object as row replacement
+ reference operator[](size_type i)
+ {
+ return reference(const_cast<K*>(&diag_[i]), i);
+ }
+
+ //! Return const_reference object as row replacement
+ const_reference operator[](size_type i) const
+ {
+ return const_reference(const_cast<K*>(&diag_[i]), i);
+ }
+
+ //! Get const reference to diagonal entry
+ const K& diagonal(size_type i) const
+ {
+ return diag_[i];
+ }
+
+ //! Get reference to diagonal entry
+ K& diagonal(size_type i)
+ {
+ return diag_[i];
+ }
+
+ //! Get const reference to diagonal vector
+ const FieldVector<K,n>& diagonal() const
+ {
+ return diag_;
+ }
+
+ //! Get reference to diagonal vector
+ FieldVector<K,n>& diagonal()
+ {
+ return diag_;
+ }
+
+ private:
+
+ // the data, a FieldVector storing the diagonal
+ FieldVector<K,n> diag_;
+ };
+
+ template< class K, int n >
+ struct FieldTraits< DiagonalMatrix<K,n> >
+ {
+ typedef typename FieldTraits<K>::field_type field_type;
+ typedef typename FieldTraits<K>::real_type real_type;
+ };
+
+
+#ifndef DOXYGEN // hide specialization
+ /** \brief Special type for 1x1 matrices
+ */
+ template< class K >
+ class DiagonalMatrix<K, 1> : public FieldMatrix<K, 1, 1>
+ {
+ typedef FieldMatrix<K,1,1> Base;
+ public:
+ //! The type used for index access and size operations
+ typedef typename Base::size_type size_type;
+
+ //! We are at the leaf of the block recursion
+ enum {
+ //! The number of block levels we contain.
+ //! This is always one for this type.
+ blocklevel = 1
+ };
+
+ typedef typename Base::row_type row_type;
+
+ typedef typename Base::row_reference row_reference;
+ typedef typename Base::const_row_reference const_row_reference;
+
+ //! export size
+ enum {
+ //! \brief The number of rows.
+ //! This is always one for this type.
+ rows = 1,
+ //! \brief The number of columns.
+ //! This is always one for this type.
+ cols = 1
+ };
+
+
+ //! Default Constructor
+ constexpr DiagonalMatrix() = default;
+
+ //! Constructor initializing the whole matrix with a scalar
+ DiagonalMatrix(const K& scalar)
+ {
+ (*this)[0][0] = scalar;
+ }
+
+ //! Get const reference to diagonal entry
+ const K& diagonal(size_type) const
+ {
+ return (*this)[0][0];
+ }
+
+ //! Get reference to diagonal entry
+ K& diagonal(size_type)
+ {
+ return (*this)[0][0];
+ }
+
+ //! Get const reference to diagonal vector
+ const FieldVector<K,1>& diagonal() const
+ {
+ return (*this)[0];
+ }
+
+ //! Get reference to diagonal vector
+ FieldVector<K,1>& diagonal()
+ {
+ return (*this)[0];
+ }
+
+ };
+#endif
+
+
+ template<class DiagonalMatrixType>
+ class DiagonalMatrixWrapper
+ {
+ typedef typename DiagonalMatrixType::reference reference;
+ typedef typename DiagonalMatrixType::const_reference const_reference;
+ typedef typename DiagonalMatrixType::field_type K;
+ typedef DiagonalRowVector<K, DiagonalMatrixType::rows> row_type;
+ typedef std::size_t size_type;
+ typedef DiagonalMatrixWrapper< DiagonalMatrixType> MyType;
+
+ friend class ContainerWrapperIterator<const MyType, reference, reference>;
+ friend class ContainerWrapperIterator<const MyType, const_reference, const_reference>;
+
+ public:
+
+ DiagonalMatrixWrapper() :
+ mat_(0)
+ {}
+
+ DiagonalMatrixWrapper(const DiagonalMatrixType* mat) :
+ mat_(const_cast<DiagonalMatrixType*>(mat))
+ {}
+
+ size_type realIndex(int i) const
+ {
+ return i;
+ }
+
+ row_type* pointer(int i) const
+ {
+ row_ = row_type(&(mat_->diagonal(i)), i);
+ return &row_;
+ }
+
+ bool identical(const DiagonalMatrixWrapper& other) const
+ {
+ return mat_==other.mat_;
+ }
+
+ private:
+
+ mutable DiagonalMatrixType* mat_;
+ mutable row_type row_;
+ };
+
+ /** \brief
+ *
+ */
+ template< class K, int n >
+ class DiagonalRowVectorConst
+ {
+ template<class DiagonalMatrixType>
+ friend class DiagonalMatrixWrapper;
+ friend class ContainerWrapperIterator<DiagonalRowVectorConst<K,n>, const K, const K&>;
+
+ public:
+ // remember size of vector
+ enum { dimension = n };
+
+ // standard constructor and everything is sufficient ...
+
+ //===== type definitions and constants
+
+ //! export the type representing the field
+ typedef K field_type;
+
+ //! export the type representing the components
+ typedef K block_type;
+
+ //! The type used for the index access and size operation
+ typedef std::size_t size_type;
+
+ //! We are at the leaf of the block recursion
+ enum {
+ //! The number of block levels we contain
+ blocklevel = 1
+ };
+
+ //! export size
+ enum {
+ //! The size of this vector.
+ size = n
+ };
+
+ //! Constructor making uninitialized vector
+ DiagonalRowVectorConst() :
+ p_(0),
+ row_(0)
+ {}
+
+ //! Constructor making vector with identical coordinates
+ explicit DiagonalRowVectorConst (K* p, int col) :
+ p_(p),
+ row_(col)
+ {}
+
+ //===== access to components
+
+ //! same for read only access
+ const K& operator[] ([[maybe_unused]] size_type i) const
+ {
+ DUNE_ASSERT_BOUNDS(i == row_);
+ return *p_;
+ }
+
+ // check if row is identical to other row (not only identical values)
+ // since this is a proxy class we need to check equality of the stored pointer
+ bool identical(const DiagonalRowVectorConst<K,n>& other) const
+ {
+ return ((p_ == other.p_)and (row_ == other.row_));
+ }
+
+ //! ConstIterator class for sequential access
+ typedef ContainerWrapperIterator<DiagonalRowVectorConst<K,n>, const K, const K&> ConstIterator;
+ //! typedef for stl compliant access
+ typedef ConstIterator const_iterator;
+
+ //! begin ConstIterator
+ ConstIterator begin () const
+ {
+ return ConstIterator(*this,0);
+ }
+
+ //! end ConstIterator
+ ConstIterator end () const
+ {
+ return ConstIterator(*this,1);
+ }
+
+ //! @returns an iterator that is positioned before
+ //! the end iterator of the rows. i.e. at the row.
+ ConstIterator beforeEnd() const
+ {
+ return ConstIterator(*this,0);
+ }
+
+ //! @returns an iterator that is positioned before
+ //! the first row of the matrix.
+ ConstIterator beforeBegin () const
+ {
+ return ConstIterator(*this,-1);
+ }
+
+ //! Binary vector comparison
+ bool operator== (const DiagonalRowVectorConst& y) const
+ {
+ return ((p_==y.p_)and (row_==y.row_));
+ }
+
+ //===== sizes
+
+ //! number of blocks in the vector (are of size 1 here)
+ size_type N () const
+ {
+ return n;
+ }
+
+ //! dimension of the vector space
+ size_type dim () const
+ {
+ return n;
+ }
+
+ //! index of this row in surrounding matrix
+ size_type rowIndex() const
+ {
+ return row_;
+ }
+
+ //! the diagonal value
+ const K& diagonal() const
+ {
+ return *p_;
+ }
+
+ protected:
+
+ size_type realIndex([[maybe_unused]] int i) const
+ {
+ return rowIndex();
+ }
+
+ K* pointer([[maybe_unused]] size_type i) const
+ {
+ return const_cast<K*>(p_);
+ }
+
+ DiagonalRowVectorConst* operator&()
+ {
+ return this;
+ }
+
+ // the data, very simply a pointer to the diagonal value and the row number
+ K* p_;
+ size_type row_;
+ };
+
+ template< class K, int n >
+ class DiagonalRowVector : public DiagonalRowVectorConst<K,n>
+ {
+ template<class DiagonalMatrixType>
+ friend class DiagonalMatrixWrapper;
+ friend class ContainerWrapperIterator<DiagonalRowVector<K,n>, K, K&>;
+
+ public:
+ // standard constructor and everything is sufficient ...
+
+ //===== type definitions and constants
+
+ //! export the type representing the field
+ typedef K field_type;
+
+ //! export the type representing the components
+ typedef K block_type;
+
+ //! The type used for the index access and size operation
+ typedef std::size_t size_type;
+
+ //! Constructor making uninitialized vector
+ DiagonalRowVector() : DiagonalRowVectorConst<K,n>()
+ {}
+
+ //! Constructor making vector with identical coordinates
+ explicit DiagonalRowVector (K* p, int col) : DiagonalRowVectorConst<K,n>(p, col)
+ {}
+
+ //===== assignment from scalar
+ //! Assignment operator for scalar
+ DiagonalRowVector& operator= (const K& k)
+ {
+ *p_ = k;
+ return *this;
+ }
+
+ //===== access to components
+
+ //! random access
+ K& operator[] ([[maybe_unused]] size_type i)
+ {
+ DUNE_ASSERT_BOUNDS(i == row_);
+ return *p_;
+ }
+
+ //! Iterator class for sequential access
+ typedef ContainerWrapperIterator<DiagonalRowVector<K,n>, K, K&> Iterator;
+ //! typedef for stl compliant access
+ typedef Iterator iterator;
+
+ //! begin iterator
+ Iterator begin ()
+ {
+ return Iterator(*this, 0);
+ }
+
+ //! end iterator
+ Iterator end ()
+ {
+ return Iterator(*this, 1);
+ }
+
+ //! @returns an iterator that is positioned before
+ //! the end iterator of the rows, i.e. at the last row.
+ Iterator beforeEnd ()
+ {
+ return Iterator(*this, 0);
+ }
+
+ //! @returns an iterator that is positioned before
+ //! the first row of the matrix.
+ Iterator beforeBegin ()
+ {
+ return Iterator(*this, -1);
+ }
+
+ //! ConstIterator class for sequential access
+ typedef ContainerWrapperIterator<DiagonalRowVectorConst<K,n>, const K, const K&> ConstIterator;
+ //! typedef for stl compliant access
+ typedef ConstIterator const_iterator;
+
+ using DiagonalRowVectorConst<K,n>::identical;
+ using DiagonalRowVectorConst<K,n>::operator[];
+ using DiagonalRowVectorConst<K,n>::operator==;
+ using DiagonalRowVectorConst<K,n>::begin;
+ using DiagonalRowVectorConst<K,n>::end;
+ using DiagonalRowVectorConst<K,n>::beforeEnd;
+ using DiagonalRowVectorConst<K,n>::beforeBegin;
+ using DiagonalRowVectorConst<K,n>::N;
+ using DiagonalRowVectorConst<K,n>::dim;
+ using DiagonalRowVectorConst<K,n>::rowIndex;
+ using DiagonalRowVectorConst<K,n>::diagonal;
+
+ protected:
+
+ DiagonalRowVector* operator&()
+ {
+ return this;
+ }
+
+ private:
+
+ using DiagonalRowVectorConst<K,n>::p_;
+ using DiagonalRowVectorConst<K,n>::row_;
+ };
+
+
+ // implement type traits
+ template<class K, int n>
+ struct const_reference< DiagonalRowVector<K,n> >
+ {
+ typedef DiagonalRowVectorConst<K,n> type;
+ };
+
+ template<class K, int n>
+ struct const_reference< DiagonalRowVectorConst<K,n> >
+ {
+ typedef DiagonalRowVectorConst<K,n> type;
+ };
+
+ template<class K, int n>
+ struct mutable_reference< DiagonalRowVector<K,n> >
+ {
+ typedef DiagonalRowVector<K,n> type;
+ };
+
+ template<class K, int n>
+ struct mutable_reference< DiagonalRowVectorConst<K,n> >
+ {
+ typedef DiagonalRowVector<K,n> type;
+ };
+
+
+
+ /** \brief Iterator class for sparse vector-like containers
+ *
+ * This class provides an iterator for sparse vector like containers.
+ * It contains a ContainerWrapper that must provide the translation
+ * from the position in the underlying container to the index
+ * in the sparse container.
+ *
+ * The ContainerWrapper must be default and copy-constructable.
+ * Furthermore it must provide the methods:
+ *
+ * bool identical(other) - check if this is identical to other (same container, not only equal)
+ * T* pointer(position) - get pointer to data at position in underlying container
+ * size_t realIndex(position) - get index in sparse container for position in underlying container
+ *
+ * Notice that the iterator stores a ContainerWrapper.
+ * This allows one to use proxy classes as underlying container
+ * and as returned reference type.
+ *
+ * \tparam CW The container wrapper class
+ * \tparam T The contained type
+ * \tparam R The reference type returned by dereference
+ */
+ template<class CW, class T, class R>
+ class ContainerWrapperIterator : public BidirectionalIteratorFacade<ContainerWrapperIterator<CW,T,R>,T, R, int>
+ {
+ typedef typename std::remove_const<CW>::type NonConstCW;
+
+ friend class ContainerWrapperIterator<CW, typename mutable_reference<T>::type, typename mutable_reference<R>::type>;
+ friend class ContainerWrapperIterator<CW, typename const_reference<T>::type, typename const_reference<R>::type>;
+
+ typedef ContainerWrapperIterator<CW, typename mutable_reference<T>::type, typename mutable_reference<R>::type> MyType;
+ typedef ContainerWrapperIterator<CW, typename const_reference<T>::type, typename const_reference<R>::type> MyConstType;
+
+ public:
+
+ // Constructors needed by the facade iterators.
+ ContainerWrapperIterator() :
+ containerWrapper_(),
+ position_(0)
+ {}
+
+ ContainerWrapperIterator(CW containerWrapper, int position) :
+ containerWrapper_(containerWrapper),
+ position_(position)
+ {}
+
+ template<class OtherContainerWrapperIteratorType>
+ ContainerWrapperIterator(OtherContainerWrapperIteratorType& other) :
+ containerWrapper_(other.containerWrapper_),
+ position_(other.position_)
+ {}
+
+ ContainerWrapperIterator(const MyType& other) :
+ containerWrapper_(other.containerWrapper_),
+ position_(other.position_)
+ {}
+
+ ContainerWrapperIterator(const MyConstType& other) :
+ containerWrapper_(other.containerWrapper_),
+ position_(other.position_)
+ {}
+
+ template<class OtherContainerWrapperIteratorType>
+ ContainerWrapperIterator& operator=(OtherContainerWrapperIteratorType& other)
+ {
+ containerWrapper_ = other.containerWrapper_;
+ position_ = other.position_;
+ return *this;
+ }
+
+ // This operator is needed since we can not get the address of the
+ // temporary object returned by dereference
+ T* operator->() const
+ {
+ return containerWrapper_.pointer(position_);
+ }
+
+ // Methods needed by the forward iterator
+ bool equals(const MyType& other) const
+ {
+ return position_ == other.position_ && containerWrapper_.identical(other.containerWrapper_);
+ }
+
+ bool equals(const MyConstType& other) const
+ {
+ return position_ == other.position_ && containerWrapper_.identical(other.containerWrapper_);
+ }
+
+ R dereference() const
+ {
+ return *containerWrapper_.pointer(position_);
+ }
+
+ void increment()
+ {
+ ++position_;
+ }
+
+ // Additional function needed by BidirectionalIterator
+ void decrement()
+ {
+ --position_;
+ }
+
+ // Additional function needed by RandomAccessIterator
+ R elementAt(int i) const
+ {
+ return *containerWrapper_.pointer(position_+i);
+ }
+
+ void advance(int n)
+ {
+ position_=position_+n;
+ }
+
+ template<class OtherContainerWrapperIteratorType>
+ std::ptrdiff_t distanceTo(OtherContainerWrapperIteratorType& other) const
+ {
+ assert(containerWrapper_.identical(other));
+ return other.position_ - position_;
+ }
+
+ std::ptrdiff_t index() const
+ {
+ return containerWrapper_.realIndex(position_);
+ }
+
+ private:
+ NonConstCW containerWrapper_;
+ size_t position_;
+ };
+
+ template <class DenseMatrix, class field, int N>
+ struct DenseMatrixAssigner<DenseMatrix, DiagonalMatrix<field, N>> {
+ static void apply(DenseMatrix& denseMatrix,
+ DiagonalMatrix<field, N> const& rhs) {
+ DUNE_ASSERT_BOUNDS(denseMatrix.M() == N);
+ DUNE_ASSERT_BOUNDS(denseMatrix.N() == N);
+ denseMatrix = field(0);
+ for (int i = 0; i < N; ++i)
+ denseMatrix[i][i] = rhs.diagonal()[i];
+ }
+ };
+ /* @} */
+} // end namespace
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+#ifndef DUNE_COMMON_DOCUMENTATION_HH
+#define DUNE_COMMON_DOCUMENTATION_HH
+
+/** \file
+ \brief Documentation related stuff
+ */
+
+namespace Dune {
+
+ /**
+ * \brief Dummy struct used for documentation purposes
+ *
+ * This struct can be used for documenting interfaces. One example would
+ * be:
+ * \code
+ * // Traits class that determines some property for some other type T
+ * template<class T>
+ * class SomeTraits {
+ * static_assert(Dune::AlwaysFalse<T>::value,
+ * "Sorry, SomeTraits must be specialized for all types");
+ * public:
+ * // The type of some property of T
+ * typedef ImplementationDefined type;
+ * };
+ * #ifndef DOXYGEN
+ * template<>
+ * struct SomeTraits<int>
+ * typedef ... type;
+ * };
+ * // ...
+ * #endif // DOXYGEN
+ * \endcode
+ *
+ * \sa implementationDefined
+ * \ingroup Common
+ */
+ struct ImplementationDefined {};
+
+ /**
+ * \brief Dummy integral value used for documentation purposes
+ *
+ * \var Dune::implementationDefined
+ * \code
+ * #include <dune/common/documentation.hh>
+ * \endcode
+ *
+ * \sa ImplementationDefined
+ * \ingroup Common
+ */
+ enum { implementationDefined };
+
+}
+
+
+#endif // DUNE_COMMON_DOCUMENTATION_HH
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_DOTPRODUCT_HH
+#define DUNE_DOTPRODUCT_HH
+
+#include "ftraits.hh"
+#include "typetraits.hh"
+
+namespace Dune {
+ /**
+ * @file
+ * @brief Provides the functions dot(a,b) := \f$a^H \cdot b \f$ and dotT(a,b) := \f$a^T \cdot b \f$
+ *
+ * The provided dot products dot,dotT are used to compute (indefinite) dot products for fundamental types as well as DUNE vector types, such as DenseVector, FieldVector, ISTLVector.
+ * Note that the definition of dot(a,b) conjugates the first argument. This agrees with the behaviour of Matlab and Petsc, but not with BLAS.
+ * @author Jö Fahlke, Matthias Wohlmuth
+ */
+
+ /** @addtogroup Common
+ *
+ * @{
+ */
+
+ template<class T, class = void>
+ struct IsVector : std::false_type {};
+
+ template<class T>
+ struct IsVector<T, std::void_t<typename T::field_type> >
+ : std::true_type {};
+
+ /** @brief computes the dot product for fundamental data types according to Petsc's VectDot function: dot(a,b) := std::conj(a)*b
+ *
+ * @see http://www.mcs.anl.gov/petsc/petsc-current/docs/manualpages/Vec/VecDot.html#VecDot
+ * @param a
+ * @param b
+ * @return conj(a)*b
+ */
+ template<class A, class B>
+ auto
+ dot(const A & a, const B & b) -> typename std::enable_if<!IsVector<A>::value && !std::is_same<typename FieldTraits<A>::field_type,typename FieldTraits<A>::real_type> ::value, decltype(conj(a)*b)>::type
+ {
+ return conj(a)*b;
+ }
+
+ /**
+ * @brief computes the dot product for fundamental data types according to Petsc's VectDot function: dot(a,b) := std::conj(a)*b
+ *
+ * Specialization for real first arguments which replaces conj(a) by a.
+ * @see http://www.mcs.anl.gov/petsc/petsc-current/docs/manualpages/Vec/VecTDot.html#VecTDot
+ * @param a
+ * @param b
+ * @return a*b (which is the same as conj(a)*b in this case)
+ */
+ // fundamental type with A being a real type
+ template<class A, class B>
+ auto
+ dot(const A & a, const B & b) -> typename std::enable_if<!IsVector<A>::value && std::is_same<typename FieldTraits<A>::field_type,typename FieldTraits<A>::real_type>::value, decltype(a*b)>::type
+ {
+ return a*b;
+ }
+
+ /**
+ * @brief computes the dot product for various dune vector types according to Petsc's VectDot function: dot(a,b) := std::conj(a)*b
+ *
+ * Specialization for real first arguments which replaces conj(a) by a.
+ * @see http://www.mcs.anl.gov/petsc/petsc-current/docs/manualpages/Vec/VecTDot.html#VecTDot
+ * @param a
+ * @param b
+ * @return dot(a,b)
+ */
+ template<typename A, typename B>
+ auto
+ dot(const A & a, const B & b) -> typename std::enable_if<IsVector<A>::value, decltype(a.dot(b))>::type
+ {
+ return a.dot(b);
+ }
+ /** @} */
+
+ /**
+ * @brief Computes an indefinite vector dot product for fundamental data types according to Petsc's VectTDot function: dotT(a,b) := a*b
+ * @see http://www.mcs.anl.gov/petsc/petsc-current/docs/manualpages/Vec/VecTDot.html#VecTDot
+ * @param a
+ * @param b
+ * @return a*b
+ */
+ template<class A, class B>
+ auto
+ dotT(const A & a, const B & b) -> decltype(a*b)
+ {
+ return a*b;
+ }
+
+ /** @} */
+} // end namespace DUNE
+
+#endif // DUNE_DOTPRODUCT_HH
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_DYNMATRIX_HH
+#define DUNE_DYNMATRIX_HH
+
+#include <cmath>
+#include <cstddef>
+#include <iostream>
+#include <initializer_list>
+
+#include <dune/common/boundschecking.hh>
+#include <dune/common/exceptions.hh>
+#include <dune/common/dynvector.hh>
+#include <dune/common/densematrix.hh>
+#include <dune/common/typetraits.hh>
+
+namespace Dune
+{
+
+ /**
+ @addtogroup DenseMatVec
+ @{
+ */
+
+ /*! \file
+ * \brief This file implements a dense matrix with dynamic numbers of rows and columns.
+ */
+
+ template< class K > class DynamicMatrix;
+
+ template< class K >
+ struct DenseMatVecTraits< DynamicMatrix<K> >
+ {
+ typedef DynamicMatrix<K> derived_type;
+
+ typedef DynamicVector<K> row_type;
+
+ typedef row_type &row_reference;
+ typedef const row_type &const_row_reference;
+
+ typedef std::vector<K> container_type;
+ typedef K value_type;
+ typedef typename container_type::size_type size_type;
+ };
+
+ template< class K >
+ struct FieldTraits< DynamicMatrix<K> >
+ {
+ typedef typename FieldTraits<K>::field_type field_type;
+ typedef typename FieldTraits<K>::real_type real_type;
+ };
+
+ /** \brief Construct a matrix with a dynamic size.
+ *
+ * \tparam K is the field type (use float, double, complex, etc)
+ */
+ template<class K>
+ class DynamicMatrix : public DenseMatrix< DynamicMatrix<K> >
+ {
+ std::vector< DynamicVector<K> > _data;
+ typedef DenseMatrix< DynamicMatrix<K> > Base;
+ public:
+ typedef typename Base::size_type size_type;
+ typedef typename Base::value_type value_type;
+ typedef typename Base::row_type row_type;
+
+ //===== constructors
+ //! \brief Default constructor
+ DynamicMatrix () {}
+
+ //! \brief Constructor initializing the whole matrix with a scalar
+ DynamicMatrix (size_type r, size_type c, value_type v = value_type() ) :
+ _data(r, row_type(c, v) )
+ {}
+
+ /** \brief Constructor initializing the matrix from a list of vector
+ */
+ DynamicMatrix (std::initializer_list<DynamicVector<K>> const &ll)
+ : _data(ll)
+ {}
+
+
+ template <class T,
+ typename = std::enable_if_t<!Dune::IsNumber<T>::value && HasDenseMatrixAssigner<DynamicMatrix, T>::value>>
+ DynamicMatrix(T const& rhs)
+ {
+ *this = rhs;
+ }
+
+ //==== resize related methods
+ /**
+ * \brief resize matrix to <code>r × c</code>
+ *
+ * Resize the matrix to <code>r × c</code>, using <code>v</code>
+ * as the value of all entries.
+ *
+ * \warning All previous entries are lost, even when the matrix
+ * was not actually resized.
+ *
+ * \param r number of rows
+ * \param c number of columns
+ * \param v value of matrix entries
+ */
+ void resize (size_type r, size_type c, value_type v = value_type() )
+ {
+ _data.resize(0);
+ _data.resize(r, row_type(c, v) );
+ }
+
+ //===== assignment
+ // General assignment with resizing
+ template <typename T,
+ typename = std::enable_if_t<!Dune::IsNumber<T>::value>>
+ DynamicMatrix& operator=(T const& rhs) {
+ _data.resize(rhs.N());
+ std::fill(_data.begin(), _data.end(), row_type(rhs.M(), K(0)));
+ Base::operator=(rhs);
+ return *this;
+ }
+
+ // Specialisation: scalar assignment (no resizing)
+ template <typename T,
+ typename = std::enable_if_t<Dune::IsNumber<T>::value>>
+ DynamicMatrix& operator=(T scalar) {
+ std::fill(_data.begin(), _data.end(), scalar);
+ return *this;
+ }
+
+ // make this thing a matrix
+ size_type mat_rows() const { return _data.size(); }
+ size_type mat_cols() const {
+ assert(this->rows());
+ return _data.front().size();
+ }
+ row_type & mat_access(size_type i) {
+ DUNE_ASSERT_BOUNDS(i < _data.size());
+ return _data[i];
+ }
+ const row_type & mat_access(size_type i) const {
+ DUNE_ASSERT_BOUNDS(i < _data.size());
+ return _data[i];
+ }
+ };
+
+ /** @} end documentation */
+
+} // end namespace
+
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_DYNMATRIXEIGENVALUES_HH
+#define DUNE_DYNMATRIXEIGENVALUES_HH
+
+#include <algorithm>
+#include <memory>
+
+#include "dynmatrix.hh"
+#include "fmatrixev.hh"
+
+/*!
+ \file
+ \brief utility functions to compute eigenvalues for
+ dense matrices.
+ \addtogroup DenseMatVec
+ @{
+ */
+
+namespace Dune {
+
+ namespace DynamicMatrixHelp {
+
+#if HAVE_LAPACK
+ using Dune::FMatrixHelp::eigenValuesNonsymLapackCall;
+#endif
+
+ /** \brief calculates the eigenvalues of a symmetric field matrix
+ \param[in] matrix matrix eigenvalues are calculated for
+ \param[out] eigenValues FieldVector that contains eigenvalues in
+ ascending order
+ \param[out] eigenVectors (optional) list of right eigenvectors
+
+ \note LAPACK::dgeev is used to calculate the eigen values
+ */
+ template <typename K, class C>
+ static void eigenValuesNonSym(const DynamicMatrix<K>& matrix,
+ DynamicVector<C>& eigenValues,
+ std::vector<DynamicVector<K>>* eigenVectors = nullptr
+ )
+ {
+
+#if HAVE_LAPACK
+ {
+ const long int N = matrix.rows();
+ const char jobvl = 'n';
+ const char jobvr = eigenVectors ? 'v' : 'n';
+
+
+ // matrix to put into dgeev
+ auto matrixVector = std::make_unique<double[]>(N*N);
+
+ // copy matrix
+ int row = 0;
+ for(int i=0; i<N; ++i)
+ {
+ for(int j=0; j<N; ++j, ++row)
+ {
+ matrixVector[ row ] = matrix[ i ][ j ];
+ }
+ }
+
+ // working memory
+ auto eigenR = std::make_unique<double[]>(N);
+ auto eigenI = std::make_unique<double[]>(N);
+
+ const long int lwork = eigenVectors ? 4*N : 3*N;
+ auto work = std::make_unique<double[]>(lwork);
+ auto vr = eigenVectors ? std::make_unique<double[]>(N*N) : std::unique_ptr<double[]>{};
+
+ // return value information
+ long int info = 0;
+
+ // call LAPACK routine (see fmatrixev_ext.cc)
+ eigenValuesNonsymLapackCall(&jobvl, &jobvr, &N, matrixVector.get(), &N,
+ eigenR.get(), eigenI.get(), nullptr, &N, vr.get(), &N, work.get(),
+ &lwork, &info);
+
+ if( info != 0 )
+ {
+ std::cerr << "For matrix " << matrix << " eigenvalue calculation failed! " << std::endl;
+ DUNE_THROW(InvalidStateException,"eigenValues: Eigenvalue calculation failed!");
+ }
+
+ eigenValues.resize(N);
+ for (int i=0; i<N; ++i)
+ eigenValues[i] = std::complex<double>(eigenR[i], eigenI[i]);
+
+ if (eigenVectors) {
+ eigenVectors->resize(N);
+ for (int i = 0; i < N; ++i) {
+ auto& v = (*eigenVectors)[i];
+ v.resize(N);
+ std::copy(vr.get() + N*i, vr.get() + N*(i+1), &v[0]);
+ }
+ }
+ }
+#else // #if HAVE_LAPACK
+ DUNE_THROW(NotImplemented,"LAPACK not found!");
+#endif
+ }
+ }
+
+}
+/** @} */
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_DYNVECTOR_HH
+#define DUNE_DYNVECTOR_HH
+
+#include <cmath>
+#include <cstddef>
+#include <cstdlib>
+#include <complex>
+#include <cstring>
+#include <initializer_list>
+#include <limits>
+#include <utility>
+
+#include "boundschecking.hh"
+#include "exceptions.hh"
+#include "genericiterator.hh"
+
+#include <vector>
+#include "densevector.hh"
+
+namespace Dune {
+
+ /** @addtogroup DenseMatVec
+ @{
+ */
+
+ /*! \file
+ * \brief This file implements a dense vector with a dynamic size.
+ */
+
+ template< class K, class Allocator > class DynamicVector;
+ template< class K, class Allocator >
+ struct DenseMatVecTraits< DynamicVector< K, Allocator > >
+ {
+ typedef DynamicVector< K, Allocator > derived_type;
+ typedef std::vector< K, Allocator > container_type;
+ typedef K value_type;
+ typedef typename container_type::size_type size_type;
+ };
+
+ template< class K, class Allocator >
+ struct FieldTraits< DynamicVector< K, Allocator > >
+ {
+ typedef typename FieldTraits< K >::field_type field_type;
+ typedef typename FieldTraits< K >::real_type real_type;
+ };
+
+ /** \brief Construct a vector with a dynamic size.
+ *
+ * \tparam K is the field type (use float, double, complex, etc)
+ * \tparam Allocator type of allocator object used to define the storage allocation model,
+ * default Allocator = std::allocator< K >.
+ */
+ template< class K, class Allocator = std::allocator< K > >
+ class DynamicVector : public DenseVector< DynamicVector< K, Allocator > >
+ {
+ std::vector< K, Allocator > _data;
+
+ typedef DenseVector< DynamicVector< K, Allocator > > Base;
+ public:
+ typedef typename Base::size_type size_type;
+ typedef typename Base::value_type value_type;
+
+ typedef std::vector< K, Allocator > container_type;
+
+ typedef Allocator allocator_type;
+
+ //! Constructor making uninitialized vector
+ explicit DynamicVector(const allocator_type &a = allocator_type() ) :
+ _data( a )
+ {}
+
+ explicit DynamicVector(size_type n, const allocator_type &a = allocator_type() ) :
+ _data( n, value_type(), a )
+ {}
+
+ //! Constructor making vector with identical coordinates
+ DynamicVector( size_type n, value_type c, const allocator_type &a = allocator_type() ) :
+ _data( n, c, a )
+ {}
+
+ /** \brief Construct from a std::initializer_list */
+ DynamicVector (std::initializer_list<K> const &l) :
+ _data(l)
+ {}
+
+ //! Constructor making vector with identical coordinates
+ DynamicVector(const DynamicVector & x) :
+ Base(), _data(x._data)
+ {}
+
+ //! Move constructor
+ DynamicVector(DynamicVector && x) :
+ _data(std::move(x._data))
+ {}
+
+ template< class T >
+ DynamicVector(const DynamicVector< T, Allocator > & x) :
+ _data(x.begin(), x.end(), x.get_allocator())
+ {}
+
+ //! Copy constructor from another DenseVector
+ template< class X >
+ DynamicVector(const DenseVector< X > & x, const allocator_type &a = allocator_type() ) :
+ _data(a)
+ {
+ const size_type n = x.size();
+ _data.reserve(n);
+ for( size_type i =0; i<n ;++i)
+ _data.push_back( x[ i ] );
+ }
+
+ using Base::operator=;
+
+ //! Copy assignment operator
+ DynamicVector &operator=(const DynamicVector &other)
+ {
+ _data = other._data;
+ return *this;
+ }
+
+ //! Move assignment operator
+ DynamicVector &operator=(DynamicVector &&other)
+ {
+ _data = std::move(other._data);
+ return *this;
+ }
+
+ //==== forward some methods of std::vector
+ /** \brief Number of elements for which memory has been allocated.
+
+ capacity() is always greater than or equal to size().
+ */
+ size_type capacity() const
+ {
+ return _data.capacity();
+ }
+ void resize (size_type n, value_type c = value_type() )
+ {
+ _data.resize(n,c);
+ }
+ void reserve (size_type n)
+ {
+ _data.reserve(n);
+ }
+
+ //==== make this thing a vector
+ size_type size() const { return _data.size(); }
+ K & operator[](size_type i) {
+ DUNE_ASSERT_BOUNDS(i < size());
+ return _data[i];
+ }
+ const K & operator[](size_type i) const {
+ DUNE_ASSERT_BOUNDS(i < size());
+ return _data[i];
+ }
+
+ //! return pointer to underlying array
+ K* data() noexcept
+ {
+ return _data.data();
+ }
+
+ //! return pointer to underlying array
+ const K* data() const noexcept
+ {
+ return _data.data();
+ }
+
+ const container_type &container () const { return _data; }
+ container_type &container () { return _data; }
+ };
+
+ /** \brief Read a DynamicVector from an input stream
+ * \relates DynamicVector
+ *
+ * \note This operator is STL compilant, i.e., the content of v is only
+ * changed if the read operation is successful.
+ *
+ * \param[in] in std :: istream to read from
+ * \param[out] v DynamicVector to be read
+ *
+ * \returns the input stream (in)
+ */
+ template< class K, class Allocator >
+ inline std::istream &operator>> ( std::istream &in,
+ DynamicVector< K, Allocator > &v )
+ {
+ DynamicVector< K, Allocator > w(v);
+ for( typename DynamicVector< K, Allocator >::size_type i = 0; i < w.size(); ++i )
+ in >> w[ i ];
+ if(in)
+ v = std::move(w);
+ return in;
+ }
+
+ /** @} end documentation */
+
+} // end namespace
+
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_ENUMSET_HH
+#define DUNE_ENUMSET_HH
+
+#include <iostream>
+
+namespace Dune
+{
+ /**
+ * @file
+ * @brief Classes for building sets out of enumeration values.
+ * @author Markus Blatt
+ */
+ /** @addtogroup Common
+ *
+ * @{
+ */
+
+ /**
+ * @brief An empty set.
+ */
+ template<typename TA>
+ class EmptySet
+ {
+ public:
+ /**
+ * @brief The POD type the set holds.
+ */
+ typedef TA Type;
+ /**
+ * @brief Always returns false.
+ */
+ static bool contains(const Type& attribute);
+ };
+
+ /**
+ * @brief A set containing everything.
+ */
+ template<typename TA>
+ class AllSet
+ {
+ public:
+ /**
+ * @brief The POD type the set holds.
+ */
+ typedef TA Type;
+ /**
+ * @brief Always returns true.
+ */
+ static bool contains(const Type& attribute);
+ };
+
+ /**
+ * @brief A set consisting only of one item.
+ */
+ template<typename TA, int item>
+ class EnumItem
+ {
+ public:
+ /**
+ * @brief The type the set holds.
+ */
+ typedef TA Type;
+
+ /**
+ * @brief Tests whether an item is in the set.
+ * @return True if item==Type.
+ */
+ static bool contains(const Type& attribute);
+ };
+
+ /**
+ * @brief A set representing a range including the borders.
+ */
+ template<typename TA,int from, int end>
+ class EnumRange //: public PODSet<EnumRange<T,from,end>,T>
+ {
+ public:
+ /**
+ * @brief The type the set holds.
+ */
+ typedef TA Type;
+ static bool contains(const Type& item);
+ };
+
+ /**
+ * @brief The negation of a set.
+ * An item is contained in the set if and only if it is not
+ * contained in the negated set.
+ */
+ template<typename S>
+ class NegateSet
+ {
+ public:
+ typedef typename S::Type Type;
+
+ static bool contains(const Type& item)
+ {
+ return !S::contains(item);
+ }
+ };
+
+ /**
+ * @brief A set combining two other sets.
+ */
+ template<class TI1, class TI2, typename TA=typename TI1::Type>
+ class Combine
+ {
+ public:
+ static bool contains(const TA& item);
+ };
+
+ template<typename TA>
+ inline bool EmptySet<TA>::contains([[maybe_unused]] const Type& attribute)
+ {
+ return false;
+ }
+
+ template<typename TA>
+ inline bool AllSet<TA>::contains([[maybe_unused]] const Type& attribute)
+ {
+ return true;
+ }
+
+ template<typename TA,int i>
+ inline bool EnumItem<TA,i>::contains(const Type& item)
+ {
+ return item==i;
+ }
+
+ template<typename TA,int i>
+ inline std::ostream& operator<<(std::ostream& os, const EnumItem<TA,i>&)
+ {
+ return os<<i;
+ }
+
+ template<typename TA, int from, int to>
+ inline bool EnumRange<TA,from,to>::contains(const Type& item)
+ {
+ return from<=item && item<=to;
+ }
+
+ template<typename TA, int from, int to>
+ inline std::ostream& operator<<(std::ostream& os, const EnumRange<TA,from,to>&)
+ {
+ return os<<"["<<from<<" - "<<to<<"]";
+ }
+
+ template<class TI1, class TI2, typename TA>
+ inline bool Combine<TI1,TI2,TA>::contains(const TA& item)
+ {
+ return TI1::contains(item) ||
+ TI2::contains(item);
+ }
+
+ template<class TI1, class TI2>
+ inline Combine<TI1,TI2,typename TI1::Type> combine([[maybe_unused]] const TI1& set1,
+ [[maybe_unused]] const TI2& set2)
+ {
+ return Combine<TI1,TI2,typename TI1::Type>();
+ }
+
+ template<class TI1, class TI2, class T>
+ inline std::ostream& operator<<(std::ostream& os, const Combine<TI1,TI2,T>&)
+ {
+ return os << TI1()<<" "<<TI2();
+ }
+ /** @} */
+}
+
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#include <dune/common/exceptions.hh>
+
+namespace Dune {
+ /*
+ static member of Dune::Exception
+ */
+ ExceptionHook * Exception::_hook = 0;
+
+ /*
+ Implementation of Dune::Exception
+ */
+ Exception::Exception ()
+ {
+ // call the hook if necessary
+ if (_hook != 0) _hook->operator()();
+ }
+
+ void Exception::registerHook (ExceptionHook * hook)
+ {
+ _hook = hook;
+ }
+
+ void Exception::clearHook ()
+ {
+ _hook = 0;
+ }
+
+ void Exception::message(const std::string & msg)
+ {
+ _message = msg;
+ }
+
+ const char* Exception::what() const noexcept
+ {
+ return _message.data();
+ }
+
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+#ifndef DUNE_EXCEPTIONS_HH
+#define DUNE_EXCEPTIONS_HH
+
+#include <exception>
+#include <string>
+#include <sstream>
+
+namespace Dune {
+
+ /*! \defgroup Exceptions Exception handling
+ \ingroup Common
+ \{
+
+ The Dune-exceptions are designed to allow a simple derivation of subclasses
+ and to accept a text written in the '<<' syntax.
+
+ Example of usage:
+
+ \code
+ #include <dune/common/exceptions.hh>
+
+ ...
+
+ class FileNotFoundError : public Dune::IOError {};
+
+ ...
+
+ void fileopen (std::string name) {
+ std::ifstream file;
+
+ file.open(name.c_str());
+
+ if (file == 0)
+ DUNE_THROW(FileNotFoundError, "File " << name << " not found!");
+
+ ...
+
+ file.close();
+ }
+
+ ...
+
+ int main () {
+ try {
+ ...
+ } catch (Dune::IOError &e) {
+ std::cerr << "I/O error: " << e << std::endl;
+ return 1;
+ } catch (Dune::Exception &e) {
+ std::cerr << "Generic Dune error: " << e << std::endl;
+ return 2;
+ }
+ }
+ \endcode
+
+ \see exceptions.hh for detailed info
+
+ */
+
+ /*! \file
+ \brief A few common exception classes
+
+ This file defines a common framework for generating exception
+ subclasses and to throw them in a simple manner
+
+ */
+
+ /* forward declarations */
+ class Exception;
+ struct ExceptionHook;
+
+ /*! \class Exception
+ \brief Base class for Dune-Exceptions
+
+ all Dune exceptions are derived from this class via trivial subclassing:
+
+ \code
+ class MyException : public Dune::Exception {};
+ \endcode
+
+ You should not \c throw a Dune::Exception directly but use the macro
+ DUNE_THROW() instead which fills the message-buffer of the exception
+ in a standard way and features a way to pass the result in the
+ operator<<-style
+
+ \see DUNE_THROW, IOError, MathError
+
+ */
+ class Exception
+ : public std::exception
+ {
+ public:
+ Exception ();
+ void message(const std::string &msg); //!< store string in internal message buffer
+ const char* what() const noexcept override; //!< output internal message buffer
+ static void registerHook (ExceptionHook * hook); //!< add a functor which is called before a Dune::Exception is emitted (see Dune::ExceptionHook) \see Dune::ExceptionHook
+ static void clearHook (); //!< remove all hooks
+ private:
+ std::string _message;
+ static ExceptionHook * _hook;
+ };
+
+ /*! \brief Base class to add a hook to the Dune::Exception
+
+ The user can add a functor which should be called before a Dune::Exception is emitted.
+
+
+ Example: attach a debugger to the process, if an exception is thrown
+ \code
+ struct ExceptionHookDebugger : public Dune::ExceptionHook
+ {
+ char * process_;
+ char * debugger_;
+ ExceptionHookDebugger (int argc, char ** argv, std::string debugger)
+ {
+ process_ = strdup(argv[0]);
+ debugger_ = strdup(debugger.c_str());
+ }
+ virtual void operator () ()
+ {
+ pid_t pid = getpid();
+ pid_t cpid;
+ cpid = fork();
+ if (cpid == 0) // child
+ {
+ char * argv[4];
+ argv[0] = debugger_;
+ argv[1] = process_;
+ argv[2] = new char[12];
+ snprintf(argv[2], 12, "%i", int(pid));
+ argv[3] = 0;
+ // execute debugger
+ std::cout << process_ << "\n";
+ std::cout << argv[0] << " "
+ << argv[1] << " "
+ << argv[2] << std::endl;
+ execv(argv[0], argv);
+ }
+ else // parent
+ {
+ // send application to sleep
+ kill(pid, SIGSTOP);
+ }
+ }
+ };
+ \endcode
+
+ This hook is registered via a static method of Dune::Exception:
+ \code
+ int main(int argc, char** argv) {
+ Dune::MPIHelper & mpihelper = Dune::MPIHelper::instance(argc,argv);
+ ExceptionHookDebugger debugger(argc, argv, "/usr/bin/ddd");
+ Dune::Exception::registerHook(& debugger);
+ try
+ {
+ ...
+ }
+ catch (std::string & s) {
+ std::cout << mpihelper.rank() << ": ERROR: " << s << std::endl;
+ }
+ catch (Dune::Exception & e) {
+ std::cout << mpihelper.rank() << ": DUNE ERROR: " << e.what() << std::endl;
+ }
+ }
+ \endcode
+
+ */
+ struct ExceptionHook
+ {
+ virtual ~ExceptionHook() {}
+ virtual void operator () () = 0;
+ };
+
+ inline std::ostream& operator<<(std::ostream &stream, const Exception &e)
+ {
+ return stream << e.what();
+ }
+
+#ifndef DOXYGEN
+ // the "format" the exception-type gets printed. __FILE__ and
+ // __LINE__ are standard C-defines, the GNU cpp-infofile claims that
+ // C99 defines __func__ as well. __FUNCTION__ is a GNU-extension
+#define THROWSPEC(E) # E << " [" << __func__ << ":" << __FILE__ << ":" << __LINE__ << "]: "
+#endif // DOXYGEN
+
+ /*! Macro to throw an exception
+
+ \code
+ #include <dune/common/exceptions.hh>
+ \endcode
+
+ \param E exception class derived from Dune::Exception
+ \param m reason for this exception in ostream-notation
+
+ Example:
+
+ \code
+ if (filehandle == 0)
+ DUNE_THROW(FileError, "Could not open " << filename << " for reading!");
+ \endcode
+
+ DUNE_THROW automatically adds information about the exception thrown
+ to the text.
+
+ \note
+ you can add a hook to be called before a Dune::Exception is emitted,
+ e.g. to add additional information to the exception,
+ or to invoke a debugger during parallel debugging. (see Dune::ExceptionHook)
+
+ */
+ // this is the magic: use the usual do { ... } while (0) trick, create
+ // the full message via a string stream and throw the created object
+#define DUNE_THROW(E, m) do { E th__ex; std::ostringstream th__out; \
+ th__out << THROWSPEC(E) << m; th__ex.message(th__out.str()); throw th__ex; \
+} while (0)
+
+ /*! \brief Default exception class for I/O errors
+
+ This is a superclass for any errors dealing with file/socket I/O problems
+ like
+
+ - file not found
+ - could not write file
+ - could not connect to remote socket
+ */
+ class IOError : public Exception {};
+
+ /*! \brief Default exception class for mathematical errors
+
+ This is the superclass for all errors which are caused by
+ mathematical problems like
+
+ - matrix not invertible
+ - not convergent
+ */
+ class MathError : public Exception {};
+
+ /*! \brief Default exception class for range errors
+
+ This is the superclass for all errors which are caused because
+ the user tries to access data that was not allocated before.
+ These can be problems like
+
+ - accessing array entries behind the last entry
+ - adding the fourth non zero entry in a sparse matrix
+ with only three non zero entries per row
+
+ */
+ class RangeError : public Exception {};
+
+ /*! \brief Default exception for dummy implementations
+
+ This exception can be used for functions/methods
+
+ - that have to be implemented but should never be called
+ - that are missing
+ */
+ class NotImplemented : public Exception {};
+
+ /*! \brief Default exception class for OS errors
+
+ This class is thrown when a system-call is used and returns an
+ error.
+
+ */
+ class SystemError : public Exception {};
+
+ /*! \brief Default exception if memory allocation fails
+
+ */
+ class OutOfMemoryError : public SystemError {};
+
+ /*! \brief Default exception if a function was called while
+ the object is not in a valid state for that function.
+ */
+ class InvalidStateException : public Exception {};
+
+ /*! \brief Default exception if an error in the parallel
+ communication of the program occurred
+ \ingroup ParallelCommunication
+ */
+ class ParallelError : public Exception {};
+
+} // end namespace
+
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+#ifndef DUNE_COMMON_FILLED_ARRAY_HH
+#define DUNE_COMMON_FILLED_ARRAY_HH
+
+/** \file
+ \brief Utility to generate an array with a certain value
+ */
+
+#include <array>
+#include <cstddef>
+
+namespace Dune
+{
+ /** @addtogroup Common
+
+ @{
+ */
+
+ //! Return an array filled with the provided value.
+ /**
+ * \note This function is `constexpr` only in C++17, or, more precisely,
+ * when `std::array::begin()` and `std::array::end()` are `constexpr`.
+ *
+ * \tparam n Size of the returned array.
+ * \tparam T Value type of the returned array. This is usually deduced
+ * from `t`.
+ */
+ template<std::size_t n, class T>
+ constexpr std::array<T, n> filledArray(const T& t)
+ {
+ std::array<T, n> arr{};
+ // this is constexpr in c++17, `arr.fill(t)` is not
+ for(auto &el : arr)
+ el = t;
+ return arr;
+ }
+
+ /** @} */
+
+} // end namespace Dune
+
+#endif // DUNE_COMMON_FILLED_ARRAY_HH
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#include "float_cmp.hh"
+
+#include <vector>
+#include <limits>
+#include <algorithm>
+#include <cstdlib>
+#include <dune/common/fvector.hh>
+
+namespace Dune {
+
+
+ namespace FloatCmp {
+ // traits
+ //! Mapping of value type to epsilon type
+ /**
+ * @ingroup FloatCmp
+ * @tparam T The value type
+ */
+ template<class T> struct EpsilonType {
+ //! The epsilon type corresponding to value type T
+ typedef T Type;
+ };
+ //! Specialization of EpsilonType for std::vector
+ /**
+ * @ingroup FloatCmp
+ * @tparam T The value_type of the std::vector
+ * @tparam A The Allocator of the std::vector
+ */
+ template<class T, typename A>
+ struct EpsilonType<std::vector<T, A> > {
+ //! The epsilon type corresponding to value type std::vector<T, A>
+ typedef typename EpsilonType<T>::Type Type;
+ };
+ //! Specialization of EpsilonType for Dune::FieldVector
+ /**
+ * @ingroup FloatCmp
+ * @tparam T The field_type of the Dune::FieldVector
+ * @tparam n The size of the Dune::FieldVector
+ */
+ template<class T, int n>
+ struct EpsilonType<FieldVector<T, n> > {
+ //! The epsilon type corresponding to value type Dune::FieldVector<T, n>
+ typedef typename EpsilonType<T>::Type Type;
+ };
+
+ // default epsilon
+ template<class T>
+ struct DefaultEpsilon<T, relativeWeak> {
+ static typename EpsilonType<T>::Type value()
+ { return std::numeric_limits<typename EpsilonType<T>::Type>::epsilon()*8.; }
+ };
+ template<class T>
+ struct DefaultEpsilon<T, relativeStrong> {
+ static typename EpsilonType<T>::Type value()
+ { return std::numeric_limits<typename EpsilonType<T>::Type>::epsilon()*8.; }
+ };
+ template<class T>
+ struct DefaultEpsilon<T, absolute> {
+ static typename EpsilonType<T>::Type value()
+ { return std::max(std::numeric_limits<typename EpsilonType<T>::Type>::epsilon(), 1e-6); }
+ };
+
+ namespace Impl {
+ // basic comparison
+ template<class T, CmpStyle style = defaultCmpStyle>
+ struct eq_t;
+ template<class T>
+ struct eq_t<T, relativeWeak> {
+ static bool eq(const T &first,
+ const T &second,
+ typename EpsilonType<T>::Type epsilon = DefaultEpsilon<T>::value())
+ {
+ using std::abs;
+ return abs(first - second) <= epsilon*std::max(abs(first), abs(second));
+ }
+ };
+ template<class T>
+ struct eq_t<T, relativeStrong> {
+ static bool eq(const T &first,
+ const T &second,
+ typename EpsilonType<T>::Type epsilon = DefaultEpsilon<T>::value())
+ {
+ using std::abs;
+ return abs(first - second) <= epsilon*std::min(abs(first), abs(second));
+ }
+ };
+ template<class T>
+ struct eq_t<T, absolute> {
+ static bool eq(const T &first,
+ const T &second,
+ typename EpsilonType<T>::Type epsilon = DefaultEpsilon<T>::value())
+ {
+ using std::abs;
+ return abs(first-second) <= epsilon;
+ }
+ };
+ template<class T, CmpStyle cstyle>
+ struct eq_t_std_vec {
+ typedef std::vector<T> V;
+ static bool eq(const V &first,
+ const V &second,
+ typename EpsilonType<V>::Type epsilon = DefaultEpsilon<V>::value()) {
+ auto size = first.size();
+ if(size != second.size()) return false;
+ for(unsigned int i = 0; i < size; ++i)
+ if(!eq_t<T, cstyle>::eq(first[i], second[i], epsilon))
+ return false;
+ return true;
+ }
+ };
+ template< class T>
+ struct eq_t<std::vector<T>, relativeWeak> : eq_t_std_vec<T, relativeWeak> {};
+ template< class T>
+ struct eq_t<std::vector<T>, relativeStrong> : eq_t_std_vec<T, relativeStrong> {};
+ template< class T>
+ struct eq_t<std::vector<T>, absolute> : eq_t_std_vec<T, absolute> {};
+
+ template<class T, int n, CmpStyle cstyle>
+ struct eq_t_fvec {
+ typedef Dune::FieldVector<T, n> V;
+ static bool eq(const V &first,
+ const V &second,
+ typename EpsilonType<V>::Type epsilon = DefaultEpsilon<V>::value()) {
+ for(int i = 0; i < n; ++i)
+ if(!eq_t<T, cstyle>::eq(first[i], second[i], epsilon))
+ return false;
+ return true;
+ }
+ };
+ template< class T, int n >
+ struct eq_t< Dune::FieldVector<T, n>, relativeWeak> : eq_t_fvec<T, n, relativeWeak> {};
+ template< class T, int n >
+ struct eq_t< Dune::FieldVector<T, n>, relativeStrong> : eq_t_fvec<T, n, relativeStrong> {};
+ template< class T, int n >
+ struct eq_t< Dune::FieldVector<T, n>, absolute> : eq_t_fvec<T, n, absolute> {};
+ } // namespace Impl
+
+ // operations in functional style
+ template <class T, CmpStyle style>
+ bool eq(const T &first,
+ const T &second,
+ typename EpsilonType<T>::Type epsilon)
+ {
+ return Impl::eq_t<T, style>::eq(first, second, epsilon);
+ }
+ template <class T, CmpStyle style>
+ bool ne(const T &first,
+ const T &second,
+ typename EpsilonType<T>::Type epsilon)
+ {
+ return !eq<T, style>(first, second, epsilon);
+ }
+ template <class T, CmpStyle style>
+ bool gt(const T &first,
+ const T &second,
+ typename EpsilonType<T>::Type epsilon)
+ {
+ return first > second && ne<T, style>(first, second, epsilon);
+ }
+ template <class T, CmpStyle style>
+ bool lt(const T &first,
+ const T &second,
+ typename EpsilonType<T>::Type epsilon)
+ {
+ return first < second && ne<T, style>(first, second, epsilon);
+ }
+ template <class T, CmpStyle style>
+ bool ge(const T &first,
+ const T &second,
+ typename EpsilonType<T>::Type epsilon)
+ {
+ return first > second || eq<T, style>(first, second, epsilon);
+ }
+ template <class T, CmpStyle style>
+ bool le(const T &first,
+ const T &second,
+ typename EpsilonType<T>::Type epsilon)
+ {
+ return first < second || eq<T, style>(first, second, epsilon);
+ }
+
+ // default template arguments
+ template <class T>
+ bool eq(const T &first,
+ const T &second,
+ typename EpsilonType<T>::Type epsilon = DefaultEpsilon<T, defaultCmpStyle>::value())
+ {
+ return eq<T, defaultCmpStyle>(first, second, epsilon);
+ }
+ template <class T>
+ bool ne(const T &first,
+ const T &second,
+ typename EpsilonType<T>::Type epsilon = DefaultEpsilon<T, defaultCmpStyle>::value())
+ {
+ return ne<T, defaultCmpStyle>(first, second, epsilon);
+ }
+ template <class T>
+ bool gt(const T &first,
+ const T &second,
+ typename EpsilonType<T>::Type epsilon = DefaultEpsilon<T, defaultCmpStyle>::value())
+ {
+ return gt<T, defaultCmpStyle>(first, second, epsilon);
+ }
+ template <class T>
+ bool lt(const T &first,
+ const T &second,
+ typename EpsilonType<T>::Type epsilon = DefaultEpsilon<T, defaultCmpStyle>::value())
+ {
+ return lt<T, defaultCmpStyle>(first, second, epsilon);
+ }
+ template <class T>
+ bool ge(const T &first,
+ const T &second,
+ typename EpsilonType<T>::Type epsilon = DefaultEpsilon<T, defaultCmpStyle>::value())
+ {
+ return ge<T, defaultCmpStyle>(first, second, epsilon);
+ }
+ template <class T>
+ bool le(const T &first,
+ const T &second,
+ typename EpsilonType<T>::Type epsilon = DefaultEpsilon<T, defaultCmpStyle>::value())
+ {
+ return le<T, defaultCmpStyle>(first, second, epsilon);
+ }
+
+ // rounding operations
+ namespace Impl {
+ template<class I, class T, CmpStyle cstyle = defaultCmpStyle, RoundingStyle rstyle = defaultRoundingStyle>
+ struct round_t;
+ template<class I, class T, CmpStyle cstyle>
+ struct round_t<I, T, cstyle, downward> {
+ static I
+ round(const T &val,
+ typename EpsilonType<T>::Type epsilon = (DefaultEpsilon<T, cstyle>::value())) {
+ // first get an approximation
+ I lower = I(val);
+ I upper;
+ if(eq<T, cstyle>(T(lower), val, epsilon)) return lower;
+ if(T(lower) > val) { upper = lower; lower--; }
+ else upper = lower+1;
+ if(le<T, cstyle>(val - T(lower), T(upper) - val, epsilon))
+ return lower;
+ else return upper;
+ }
+ };
+ template<class I, class T, CmpStyle cstyle>
+ struct round_t<I, T, cstyle, upward> {
+ static I
+ round(const T &val,
+ typename EpsilonType<T>::Type epsilon = (DefaultEpsilon<T, cstyle>::value())) {
+ // first get an approximation
+ I lower = I(val);
+ I upper;
+ if(eq<T, cstyle>(T(lower), val, epsilon)) return lower;
+ if(T(lower) > val) { upper = lower; lower--; }
+ else upper = lower+1;
+ if(lt<T, cstyle>(val - T(lower), T(upper) - val, epsilon))
+ return lower;
+ else return upper;
+ }
+ };
+ template<class I, class T, CmpStyle cstyle>
+ struct round_t<I, T, cstyle, towardZero> {
+ static I
+ round(const T &val,
+ typename EpsilonType<T>::Type epsilon = (DefaultEpsilon<T, cstyle>::value())) {
+ if(val > T(0))
+ return round_t<I, T, cstyle, downward>::round(val, epsilon);
+ else return round_t<I, T, cstyle, upward>::round(val, epsilon);
+ }
+ };
+ template<class I, class T, CmpStyle cstyle>
+ struct round_t<I, T, cstyle, towardInf> {
+ static I
+ round(const T &val,
+ typename EpsilonType<T>::Type epsilon = (DefaultEpsilon<T, cstyle>::value())) {
+ if(val > T(0))
+ return round_t<I, T, cstyle, upward>::round(val, epsilon);
+ else return round_t<I, T, cstyle, downward>::round(val, epsilon);
+ }
+ };
+ template<class I, class T, CmpStyle cstyle, RoundingStyle rstyle>
+ struct round_t<std::vector<I>, std::vector<T>, cstyle, rstyle> {
+ static std::vector<I>
+ round(const T &val,
+ typename EpsilonType<T>::Type epsilon = (DefaultEpsilon<T, cstyle>::value())) {
+ unsigned int size = val.size();
+ std::vector<I> res(size);
+ for(unsigned int i = 0; i < size; ++i)
+ res[i] = round_t<I, T, cstyle, rstyle>::round(val[i], epsilon);
+ return res;
+ }
+ };
+ template<class I, class T, int n, CmpStyle cstyle, RoundingStyle rstyle>
+ struct round_t<Dune::FieldVector<I, n>, Dune::FieldVector<T, n>, cstyle, rstyle> {
+ static Dune::FieldVector<I, n>
+ round(const T &val,
+ typename EpsilonType<T>::Type epsilon = (DefaultEpsilon<T, cstyle>::value())) {
+ Dune::FieldVector<I, n> res;
+ for(int i = 0; i < n; ++i)
+ res[i] = round_t<I, T, cstyle, rstyle>::round(val[i], epsilon);
+ return res;
+ }
+ };
+ } // end namespace Impl
+ template<class I, class T, CmpStyle cstyle, RoundingStyle rstyle>
+ I round(const T &val, typename EpsilonType<T>::Type epsilon /*= DefaultEpsilon<T, cstyle>::value()*/)
+ {
+ return Impl::round_t<I, T, cstyle, rstyle>::round(val, epsilon);
+ }
+ template<class I, class T, CmpStyle cstyle>
+ I round(const T &val, typename EpsilonType<T>::Type epsilon = DefaultEpsilon<T, cstyle>::value())
+ {
+ return round<I, T, cstyle, defaultRoundingStyle>(val, epsilon);
+ }
+ template<class I, class T, RoundingStyle rstyle>
+ I round(const T &val, typename EpsilonType<T>::Type epsilon = DefaultEpsilon<T, defaultCmpStyle>::value())
+ {
+ return round<I, T, defaultCmpStyle, rstyle>(val, epsilon);
+ }
+ template<class I, class T>
+ I round(const T &val, typename EpsilonType<T>::Type epsilon = DefaultEpsilon<T, defaultCmpStyle>::value())
+ {
+ return round<I, T, defaultCmpStyle>(val, epsilon);
+ }
+
+ // truncation
+ namespace Impl {
+ template<class I, class T, CmpStyle cstyle = defaultCmpStyle, RoundingStyle rstyle = defaultRoundingStyle>
+ struct trunc_t;
+ template<class I, class T, CmpStyle cstyle>
+ struct trunc_t<I, T, cstyle, downward> {
+ static I
+ trunc(const T &val,
+ typename EpsilonType<T>::Type epsilon = (DefaultEpsilon<T, cstyle>::value())) {
+ // this should be optimized away unless needed
+ if(!std::numeric_limits<I>::is_signed)
+ // make sure this works for all useful cases even if I is an unsigned type
+ if(eq<T, cstyle>(val, T(0), epsilon)) return I(0);
+ // first get an approximation
+ I lower = I(val); // now |val-lower| < 1
+ // make sure we're really lower in case the cast truncated to an unexpected direction
+ if(T(lower) > val) lower--; // now val-lower < 1
+ // check whether lower + 1 is approximately val
+ if(eq<T, cstyle>(T(lower+1), val, epsilon))
+ return lower+1;
+ else return lower;
+ }
+ };
+ template<class I, class T, CmpStyle cstyle>
+ struct trunc_t<I, T, cstyle, upward> {
+ static I
+ trunc(const T &val,
+ typename EpsilonType<T>::Type epsilon = (DefaultEpsilon<T, cstyle>::value())) {
+ I upper = trunc_t<I, T, cstyle, downward>::trunc(val, epsilon);
+ if(ne<T, cstyle>(T(upper), val, epsilon)) ++upper;
+ return upper;
+ }
+ };
+ template<class I, class T, CmpStyle cstyle>
+ struct trunc_t<I, T, cstyle, towardZero> {
+ static I
+ trunc(const T &val,
+ typename EpsilonType<T>::Type epsilon = (DefaultEpsilon<T, cstyle>::value())) {
+ if(val > T(0)) return trunc_t<I, T, cstyle, downward>::trunc(val, epsilon);
+ else return trunc_t<I, T, cstyle, upward>::trunc(val, epsilon);
+ }
+ };
+ template<class I, class T, CmpStyle cstyle>
+ struct trunc_t<I, T, cstyle, towardInf> {
+ static I
+ trunc(const T &val,
+ typename EpsilonType<T>::Type epsilon = (DefaultEpsilon<T, cstyle>::value())) {
+ if(val > T(0)) return trunc_t<I, T, cstyle, upward>::trunc(val, epsilon);
+ else return trunc_t<I, T, cstyle, downward>::trunc(val, epsilon);
+ }
+ };
+ template<class I, class T, CmpStyle cstyle, RoundingStyle rstyle>
+ struct trunc_t<std::vector<I>, std::vector<T>, cstyle, rstyle> {
+ static std::vector<I>
+ trunc(const std::vector<T> &val,
+ typename EpsilonType<T>::Type epsilon = (DefaultEpsilon<T, cstyle>::value())) {
+ unsigned int size = val.size();
+ std::vector<I> res(size);
+ for(unsigned int i = 0; i < size; ++i)
+ res[i] = trunc_t<I, T, cstyle, rstyle>::trunc(val[i], epsilon);
+ return res;
+ }
+ };
+ template<class I, class T, int n, CmpStyle cstyle, RoundingStyle rstyle>
+ struct trunc_t<Dune::FieldVector<I, n>, Dune::FieldVector<T, n>, cstyle, rstyle> {
+ static Dune::FieldVector<I, n>
+ trunc(const Dune::FieldVector<T, n> &val,
+ typename EpsilonType<T>::Type epsilon = (DefaultEpsilon<T, cstyle>::value())) {
+ Dune::FieldVector<I, n> res;
+ for(int i = 0; i < n; ++i)
+ res[i] = trunc_t<I, T, cstyle, rstyle>::trunc(val[i], epsilon);
+ return res;
+ }
+ };
+ } // namespace Impl
+ template<class I, class T, CmpStyle cstyle, RoundingStyle rstyle>
+ I trunc(const T &val, typename EpsilonType<T>::Type epsilon /*= DefaultEpsilon<T, cstyle>::value()*/)
+ {
+ return Impl::trunc_t<I, T, cstyle, rstyle>::trunc(val, epsilon);
+ }
+ template<class I, class T, CmpStyle cstyle>
+ I trunc(const T &val, typename EpsilonType<T>::Type epsilon = DefaultEpsilon<T, cstyle>::value())
+ {
+ return trunc<I, T, cstyle, defaultRoundingStyle>(val, epsilon);
+ }
+ template<class I, class T, RoundingStyle rstyle>
+ I trunc(const T &val, typename EpsilonType<T>::Type epsilon = DefaultEpsilon<T, defaultCmpStyle>::value())
+ {
+ return trunc<I, T, defaultCmpStyle, rstyle>(val, epsilon);
+ }
+ template<class I, class T>
+ I trunc(const T &val, typename EpsilonType<T>::Type epsilon = DefaultEpsilon<T, defaultCmpStyle>::value())
+ {
+ return trunc<I, T, defaultCmpStyle>(val, epsilon);
+ }
+ } //namespace Dune
+
+ // oo interface
+ template<class T, FloatCmp::CmpStyle cstyle_, FloatCmp::RoundingStyle rstyle_>
+ FloatCmpOps<T, cstyle_, rstyle_>::
+ FloatCmpOps(EpsilonType epsilon) : epsilon_(epsilon) {}
+
+
+ template<class T, FloatCmp::CmpStyle cstyle_, FloatCmp::RoundingStyle rstyle_>
+ typename FloatCmpOps<T, cstyle_, rstyle_>::EpsilonType
+ FloatCmpOps<T, cstyle_, rstyle_>::epsilon() const
+ {
+ return epsilon_;
+ }
+
+ template<class T, FloatCmp::CmpStyle cstyle_, FloatCmp::RoundingStyle rstyle_>
+ void
+ FloatCmpOps<T, cstyle_, rstyle_>::epsilon(EpsilonType epsilon__)
+ {
+ epsilon_ = epsilon__;
+ }
+
+
+ template<class T, FloatCmp::CmpStyle cstyle_, FloatCmp::RoundingStyle rstyle_>
+ bool FloatCmpOps<T, cstyle_, rstyle_>::
+ eq(const ValueType &first, const ValueType &second) const
+ {
+ return Dune::FloatCmp::eq<ValueType, cstyle>(first, second, epsilon_);
+ }
+
+ template<class T, FloatCmp::CmpStyle cstyle_, FloatCmp::RoundingStyle rstyle_>
+ bool FloatCmpOps<T, cstyle_, rstyle_>::
+ ne(const ValueType &first, const ValueType &second) const
+ {
+ return Dune::FloatCmp::ne<ValueType, cstyle>(first, second, epsilon_);
+ }
+
+ template<class T, FloatCmp::CmpStyle cstyle_, FloatCmp::RoundingStyle rstyle_>
+ bool FloatCmpOps<T, cstyle_, rstyle_>::
+ gt(const ValueType &first, const ValueType &second) const
+ {
+ return Dune::FloatCmp::gt<ValueType, cstyle>(first, second, epsilon_);
+ }
+
+ template<class T, FloatCmp::CmpStyle cstyle_, FloatCmp::RoundingStyle rstyle_>
+ bool FloatCmpOps<T, cstyle_, rstyle_>::
+ lt(const ValueType &first, const ValueType &second) const
+ {
+ return Dune::FloatCmp::lt<ValueType, cstyle>(first, second, epsilon_);
+ }
+
+ template<class T, FloatCmp::CmpStyle cstyle_, FloatCmp::RoundingStyle rstyle_>
+ bool FloatCmpOps<T, cstyle_, rstyle_>::
+ ge(const ValueType &first, const ValueType &second) const
+ {
+ return Dune::FloatCmp::ge<ValueType, cstyle>(first, second, epsilon_);
+ }
+
+ template<class T, FloatCmp::CmpStyle cstyle_, FloatCmp::RoundingStyle rstyle_>
+ bool FloatCmpOps<T, cstyle_, rstyle_>::
+ le(const ValueType &first, const ValueType &second) const
+ {
+ return Dune::FloatCmp::le<ValueType, cstyle>(first, second, epsilon_);
+ }
+
+
+ template<class T, FloatCmp::CmpStyle cstyle_, FloatCmp::RoundingStyle rstyle_>
+ template<class I>
+ I FloatCmpOps<T, cstyle_, rstyle_>::
+ round(const ValueType &val) const
+ {
+ return Dune::FloatCmp::round<I, ValueType, cstyle, rstyle_>(val, epsilon_);
+ }
+
+ template<class T, FloatCmp::CmpStyle cstyle_, FloatCmp::RoundingStyle rstyle_>
+ template<class I>
+ I FloatCmpOps<T, cstyle_, rstyle_>::
+ trunc(const ValueType &val) const
+ {
+ return Dune::FloatCmp::trunc<I, ValueType, cstyle, rstyle_>(val, epsilon_);
+ }
+
+} //namespace Dune
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_COMMON_FLOAT_CMP_HH
+#define DUNE_COMMON_FLOAT_CMP_HH
+
+/** \file
+ * \brief Various ways to compare floating-point numbers
+ */
+
+/**
+ @addtogroup FloatCmp
+
+ @section How_to_compare How to compare floats
+
+ When comparing floating point numbers for equality, one often faces the
+ problem that floating point operations are not always exact. For example on
+ i386 the expression
+ @code
+ 0.2 + 0.2 + 0.2 + 0.2 + 0.2 + 0.2 + 0.2 + 0.2 + 0.2 + 0.2 == 2.0
+ @endcode
+ evaluates to
+ @code
+ 1.99999999999999977796 == 2.00000000000000000000
+ @endcode
+ which is false. One solution is to compare approximately, using an epsilon
+ which says how much deviation to accept.
+
+ The most straightforward way of comparing is using an @em absolute epsilon.
+ This means comparison for equality is replaced by
+ @code
+ abs(first-second) <= epsilon
+ @endcode
+ This has a severe disadvantage: if you have an epsilon like 1e-10 but first
+ and second are of the magnitude 1e-15 everything will compare equal which is
+ certainly not what you want. This can be overcome by selecting an
+ appropriate epsilon. Nevertheless this method of comparing is not
+ recommended in general, and we will present a more robus method in the
+ next paragraph.
+
+ There is another way of comparing approximately, using a @em relative
+ epsilon which is then scaled with first:
+ @code
+ abs(first-second) <= epsilon * abs(first)
+ @endcode
+ Of cource the comparison should be symmetric in first and second so we
+ cannot arbitrarily select either first or second to scale epsilon. The are
+ two symmetric variants, @em relative_weak
+ @code
+ abs(first-second) <= epsilon * max(abs(first), abs(second))
+ @endcode
+ and @em relative_strong
+ @code
+ abs(first-second) <= epsilon * min(abs(first), abs(second))
+ @endcode
+ Both variants are good, but in practice the relative_weak variant is
+ preferred. This is also the default variant.
+
+ \note Although using a relative epsilon is better than using an absolute
+ epsilon, using a relative epsilon leads to problems if either first or
+ second equals 0. In principle the relative method can be combined
+ with an absolute method using an epsilon near the minimum
+ representable positive value, but this is not implemented here.
+
+ There is a completely different way of comparing floats. Instead of giving
+ an epsilon, the programmer states how many representable value are allowed
+ between first and second. See the "Comparing using integers" section in
+ http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm
+ for more about that.
+
+ @section Interface Interface
+
+ To do the comparison, you can use the free functions @link
+ Dune::FloatCmp::eq eq()@endlink, @link Dune::FloatCmp::ne ne()@endlink,
+ @link Dune::FloatCmp::gt gt()@endlink, @link Dune::FloatCmp::lt
+ lt()@endlink, @link Dune::FloatCmp::ge ge()@endlink and @link
+ Dune::FloatCmp::le le()@endlink from the namespace Dune::FloatCmp. They
+ take the values to compare and optionally an epsilon, which defaults to 8
+ times the machine epsilon (the difference between 1.0 and the smallest
+ representable value > 1.0) for relative comparisons, or simply 1e-6 for
+ absolute comparisons. The compare style can be given as an optional second
+ template parameter and defaults to relative_weak.
+
+ You can also use the class Dune::FloatCmpOps which has @link
+ Dune::FloatCmpOps::eq eq()@endlink, @link Dune::FloatCmpOps::ne
+ ne()@endlink, @link Dune::FloatCmpOps::gt gt()@endlink, @link
+ Dune::FloatCmpOps::lt lt()@endlink, @link Dune::FloatCmpOps::ge ge()@endlink
+ and @link Dune::FloatCmpOps::le le()@endlink as member functions. In this
+ case the class encapsulates the epsilon and the comparison style (again the
+ defaults from the previous paragraph apply). This may be more convenient if
+ you write your own class utilizing floating point comparisons, and you want
+ the user of you class to specify epsilon and compare style.
+ */
+
+//! Dune namespace
+namespace Dune {
+ //! FloatCmp namespace
+ //! @ingroup FloatCmp
+ namespace FloatCmp {
+ // basic constants
+ //! How to compare
+ //! @ingroup FloatCmp
+ enum CmpStyle {
+ //! |a-b|/|a| <= epsilon || |a-b|/|b| <= epsilon
+ relativeWeak,
+ //! |a-b|/|a| <= epsilon && |a-b|/|b| <= epsilon
+ relativeStrong,
+ //! |a-b| <= epsilon
+ absolute,
+ //! the global default compare style (relative_weak)
+ defaultCmpStyle = relativeWeak
+ };
+ //! How to round or truncate
+ //! @ingroup FloatCmp
+ enum RoundingStyle {
+ //! always round toward 0
+ towardZero,
+ //! always round away from 0
+ towardInf,
+ //! round toward \f$-\infty\f$
+ downward,
+ //! round toward \f$+\infty\f$
+ upward,
+ //! the global default rounding style (toward_zero)
+ defaultRoundingStyle = towardZero
+ };
+
+ template<class T> struct EpsilonType;
+
+ //! mapping from a value type and a compare style to a default epsilon
+ /**
+ * @ingroup FloatCmp
+ * @tparam T The value type to map from
+ * @tparam style The compare style to map from
+ */
+ template<class T, CmpStyle style = defaultCmpStyle>
+ struct DefaultEpsilon {
+ //! Returns the default epsilon for the given value type and compare style
+ static typename EpsilonType<T>::Type value();
+ };
+
+ // operations in functional style
+
+ //! @addtogroup FloatCmp
+ //! @{
+
+ //! test for equality using epsilon
+ /**
+ * @tparam T Type of the values to compare
+ * @tparam style How to compare. This defaults to defaultCmpStyle.
+ * @param first left operand of equals operation
+ * @param second right operand of equals operation
+ * @param epsilon The epsilon to use in the comparison
+ */
+ template <class T, CmpStyle style /*= defaultCmpStyle*/>
+ bool eq(const T &first,
+ const T &second,
+ typename EpsilonType<T>::Type epsilon = DefaultEpsilon<T, style>::value());
+ //! test for inequality using epsilon
+ /**
+ * @tparam T Type of the values to compare
+ * @tparam style How to compare. This defaults to defaultCmpStyle.
+ * @param first left operand of not-equal operation
+ * @param second right operand of not-equal operation
+ * @param epsilon The epsilon to use in the comparison
+ * @return !eq(first, second, epsilon)
+ */
+ template <class T, CmpStyle style /*= defaultCmpStyle*/>
+ bool ne(const T &first,
+ const T &second,
+ typename EpsilonType<T>::Type epsilon = DefaultEpsilon<T, style>::value());
+ //! test if first greater than second
+ /**
+ * @tparam T Type of the values to compare
+ * @tparam style How to compare. This defaults to defaultCmpStyle.
+ * @param first left operand of greater-than operation
+ * @param second right operand of greater-than operation
+ * @param epsilon The epsilon to use in the comparison
+ * @return ne(first, second, epsilon) && first > second
+ *
+ * this is like first > second but the region that compares equal with an
+ * epsilon is excluded
+ */
+ template <class T, CmpStyle style /*= defaultCmpStyle*/>
+ bool gt(const T &first,
+ const T &second,
+ typename EpsilonType<T>::Type epsilon = DefaultEpsilon<T, style>::value());
+ //! test if first lesser than second
+ /**
+ * @tparam T Type of the values to compare
+ * @tparam style How to compare. This defaults to defaultCmpStyle.
+ * @param first left operand of less-than operation
+ * @param second right operand of less-than operation
+ * @param epsilon The epsilon to use in the comparison
+ * @return ne(first, second, epsilon) && first < second
+ *
+ * this is like first < second, but the region that compares equal with an
+ * epsilon is excluded
+ */
+ template <class T, CmpStyle style /*= defaultCmpStyle*/>
+ bool lt(const T &first,
+ const T &second,
+ typename EpsilonType<T>::Type epsilon = DefaultEpsilon<T, style>::value());
+ //! test if first greater or equal second
+ /**
+ * @tparam T Type of the values to compare
+ * @tparam style How to compare. This defaults to defaultCmpStyle.
+ * @param first left operand of greater-or-equals operation
+ * @param second right operand of greater-or-equals operation
+ * @param epsilon The epsilon to use in the comparison
+ * @return eq(first, second, epsilon) || first > second
+ *
+ * this is like first > second, but the region that compares equal with an
+ * epsilon is also included
+ */
+ template <class T, CmpStyle style /*= defaultCmpStyle*/>
+ bool ge(const T &first,
+ const T &second,
+ typename EpsilonType<T>::Type epsilon = DefaultEpsilon<T, style>::value());
+ //! test if first lesser or equal second
+ /**
+ * @tparam T Type of the values to compare
+ * @tparam style How to compare. This defaults to defaultCmpStyle.
+ * @param first left operand of less-or-equals operation
+ * @param second right operand of less-or-equals operation
+ * @param epsilon The epsilon to use in the comparison
+ * @return eq(first, second) || first < second
+ *
+ * this is like first < second, but the region that compares equal with an
+ * epsilon is also included
+ */
+ template <class T, CmpStyle style /*= defaultCmpStyle*/>
+ bool le(const T &first,
+ const T &second,
+ typename EpsilonType<T>::Type epsilon = DefaultEpsilon<T, style>::value());
+
+ // rounding operations
+ //! round using epsilon
+ /**
+ * @tparam I The integral type to round to
+ * @tparam T Type of the value to round
+ * @tparam cstyle How to compare. This defaults to defaultCmpStyle.
+ * @tparam rstyle How to round. This defaults to defaultRoundingStyle.
+ * @param val The value to round
+ * @param epsilon The epsilon to use in comparisons
+ * @return The rounded value
+ *
+ * Round according to rstyle. If val is already near the mean of two
+ * adjacent integers in terms of epsilon, the result will be the rounded
+ * mean.
+ */
+ template<class I, class T, CmpStyle cstyle /*= defaultCmpStyle*/, RoundingStyle rstyle /*= defaultRoundingStyle*/>
+ I round(const T &val, typename EpsilonType<T>::Type epsilon = DefaultEpsilon<T, cstyle>::value());
+ // truncation
+ //! truncate using epsilon
+ /**
+ * @tparam I The integral type to truncate to
+ * @tparam T Type of the value to truncate
+ * @tparam cstyle How to compare. This defaults to defaultCmpStyle.
+ * @tparam rstyle How to truncate. This defaults to defaultRoundingStyle.
+ * @param val The value to truncate
+ * @param epsilon The epsilon to use in comparisons
+ * @return The truncated value
+ *
+ * Truncate according to rstyle. If val is already near an integer in
+ * terms of epsilon, the result will be that integer instead of the real
+ * truncated value.
+ */
+ template<class I, class T, CmpStyle cstyle /*= defaultCmpStyle*/, RoundingStyle rstyle /*= defaultRoundingStyle*/>
+ I trunc(const T &val, typename EpsilonType<T>::Type epsilon = DefaultEpsilon<T, cstyle>::value());
+
+ //! @}
+ // group FloatCmp
+ } //namespace FloatCmp
+
+
+ // oo interface
+ //! Class encapsulating a default epsilon
+ /**
+ * @ingroup FloatCmp
+ * @tparam T Type of the values to compare
+ * @tparam cstyle_ How to compare
+ * @tparam rstyle_ How to round
+ */
+ template<class T, FloatCmp::CmpStyle cstyle_ = FloatCmp::defaultCmpStyle,
+ FloatCmp::RoundingStyle rstyle_ = FloatCmp::defaultRoundingStyle>
+ class FloatCmpOps {
+ typedef FloatCmp::CmpStyle CmpStyle;
+ typedef FloatCmp::RoundingStyle RoundingStyle;
+
+ public:
+ // record template parameters
+ //! How comparisons are done
+ static const CmpStyle cstyle = cstyle_;
+ //! How rounding is done
+ static const RoundingStyle rstyle = rstyle_;
+ //! Type of the values to compare
+ typedef T ValueType;
+ //! Type of the epsilon.
+ /**
+ * May be different from the value type, for example for complex<double>
+ */
+ typedef typename FloatCmp::EpsilonType<T>::Type EpsilonType;
+
+ private:
+ EpsilonType epsilon_;
+
+ typedef FloatCmp::DefaultEpsilon<EpsilonType, cstyle> DefaultEpsilon;
+
+ public:
+ //! construct an operations object
+ /**
+ * @param epsilon Use the specified epsilon for comparing
+ */
+ FloatCmpOps(EpsilonType epsilon = DefaultEpsilon::value());
+
+ //! return the current epsilon
+ EpsilonType epsilon() const;
+ //! set new epsilon
+ void epsilon(EpsilonType epsilon__);
+
+ //! test for equality using epsilon
+ bool eq(const ValueType &first, const ValueType &second) const;
+ //! test for inequality using epsilon
+ /**
+ * this is exactly !eq(first, second)
+ */
+ bool ne(const ValueType &first, const ValueType &second) const;
+ //! test if first greater than second
+ /**
+ * this is exactly ne(first, second) && first > second, i.e. greater but
+ * the region that compares equal with an epsilon is excluded
+ */
+ bool gt(const ValueType &first, const ValueType &second) const;
+ //! test if first lesser than second
+ /**
+ * this is exactly ne(first, second) && first < second, i.e. lesser but
+ * the region that compares equal with an epsilon is excluded
+ */
+ bool lt(const ValueType &first, const ValueType &second) const;
+ //! test if first greater or equal second
+ /**
+ * this is exactly eq(first, second) || first > second, i.e. greater but
+ * the region that compares equal with an epsilon is also included
+ */
+ bool ge(const ValueType &first, const ValueType &second) const;
+ //! test if first lesser or equal second
+ /**
+ * this is exactly eq(first, second) || first > second, i.e. lesser but
+ * the region that compares equal with an epsilon is also included
+ */
+ bool le(const ValueType &first, const ValueType &second) const;
+
+ //! round using epsilon
+ /**
+ * @tparam I The integral type to round to
+ *
+ * @param val The value to round
+ *
+ * Round according to rstyle. If val is already near the mean of two
+ * adjacent integers in terms of epsilon, the result will be the rounded
+ * mean.
+ */
+ template<class I>
+ I round(const ValueType &val) const;
+
+ //! truncate using epsilon
+ /**
+ * @tparam I The integral type to truncate to
+ *
+ * @param val The value to truncate
+ *
+ * Truncate according to rstyle. If val is already near an integer in
+ * terms of epsilon, the result will be that integer instead of the real
+ * truncated value.
+ */
+ template<class I>
+ I trunc(const ValueType &val) const;
+
+ };
+
+} //namespace Dune
+
+#include "float_cmp.cc"
+
+#endif //DUNE_COMMON_FLOAT_CMP_HH
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_FMATRIX_HH
+#define DUNE_FMATRIX_HH
+
+#include <cmath>
+#include <cstddef>
+#include <iostream>
+#include <algorithm>
+#include <initializer_list>
+
+#include <dune/common/boundschecking.hh>
+#include <dune/common/exceptions.hh>
+#include <dune/common/fvector.hh>
+#include <dune/common/densematrix.hh>
+#include <dune/common/precision.hh>
+#include <dune/common/promotiontraits.hh>
+#include <dune/common/typetraits.hh>
+
+namespace Dune
+{
+
+ /**
+ @addtogroup DenseMatVec
+ @{
+ */
+
+ /*! \file
+
+ \brief Implements a matrix constructed from a given type
+ representing a field and compile-time given number of rows and columns.
+ */
+
+ template< class K, int ROWS, int COLS = ROWS > class FieldMatrix;
+
+ template< class K, int ROWS, int COLS >
+ struct DenseMatVecTraits< FieldMatrix<K,ROWS,COLS> >
+ {
+ typedef FieldMatrix<K,ROWS,COLS> derived_type;
+
+ // each row is implemented by a field vector
+ typedef FieldVector<K,COLS> row_type;
+
+ typedef row_type &row_reference;
+ typedef const row_type &const_row_reference;
+
+ typedef std::array<row_type,ROWS> container_type;
+ typedef K value_type;
+ typedef typename container_type::size_type size_type;
+ };
+
+ template< class K, int ROWS, int COLS >
+ struct FieldTraits< FieldMatrix<K,ROWS,COLS> >
+ {
+ typedef typename FieldTraits<K>::field_type field_type;
+ typedef typename FieldTraits<K>::real_type real_type;
+ };
+
+ /**
+ @brief A dense n x m matrix.
+
+ Matrices represent linear maps from a vector space V to a vector space W.
+ This class represents such a linear map by storing a two-dimensional
+ %array of numbers of a given field type K. The number of rows and
+ columns is given at compile time.
+ */
+ template<class K, int ROWS, int COLS>
+ class FieldMatrix : public DenseMatrix< FieldMatrix<K,ROWS,COLS> >
+ {
+ std::array< FieldVector<K,COLS>, ROWS > _data;
+ typedef DenseMatrix< FieldMatrix<K,ROWS,COLS> > Base;
+ public:
+
+ //! export size
+ enum {
+ //! The number of rows.
+ rows = ROWS,
+ //! The number of columns.
+ cols = COLS
+ };
+
+ typedef typename Base::size_type size_type;
+ typedef typename Base::row_type row_type;
+
+ typedef typename Base::row_reference row_reference;
+ typedef typename Base::const_row_reference const_row_reference;
+
+ //===== constructors
+ /** \brief Default constructor
+ */
+ constexpr FieldMatrix() = default;
+
+ /** \brief Constructor initializing the matrix from a list of vector
+ */
+ FieldMatrix(std::initializer_list<Dune::FieldVector<K, cols> > const &l) {
+ assert(l.size() == rows); // Actually, this is not needed any more!
+ std::copy_n(l.begin(), std::min(static_cast<std::size_t>(ROWS),
+ l.size()),
+ _data.begin());
+ }
+
+ template <class T,
+ typename = std::enable_if_t<HasDenseMatrixAssigner<FieldMatrix, T>::value>>
+ FieldMatrix(T const& rhs)
+ {
+ *this = rhs;
+ }
+
+ using Base::operator=;
+
+ //! copy assignment operator
+ FieldMatrix& operator=(const FieldMatrix&) = default;
+
+ //! copy assignment from FieldMatrix over a different field
+ template<typename T>
+ FieldMatrix& operator=(const FieldMatrix<T, ROWS, COLS>& x)
+ {
+ _data = x._data;
+ return *this;
+ }
+
+ //! no copy assignment from FieldMatrix of different size
+ template <typename T, int rows, int cols>
+ FieldMatrix& operator=(FieldMatrix<T,rows,cols> const&) = delete;
+
+ //! vector space addition -- two-argument version
+ template <class OtherScalar>
+ friend auto operator+ ( const FieldMatrix& matrixA,
+ const FieldMatrix<OtherScalar,ROWS,COLS>& matrixB)
+ {
+ FieldMatrix<typename PromotionTraits<K,OtherScalar>::PromotedType,ROWS,COLS> result;
+
+ for (size_type i = 0; i < ROWS; ++i)
+ for (size_type j = 0; j < COLS; ++j)
+ result[i][j] = matrixA[i][j] + matrixB[i][j];
+
+ return result;
+ }
+
+ //! vector space subtraction -- two-argument version
+ template <class OtherScalar>
+ friend auto operator- ( const FieldMatrix& matrixA,
+ const FieldMatrix<OtherScalar,ROWS,COLS>& matrixB)
+ {
+ FieldMatrix<typename PromotionTraits<K,OtherScalar>::PromotedType,ROWS,COLS> result;
+
+ for (size_type i = 0; i < ROWS; ++i)
+ for (size_type j = 0; j < COLS; ++j)
+ result[i][j] = matrixA[i][j] - matrixB[i][j];
+
+ return result;
+ }
+
+ //! vector space multiplication with scalar
+ template <class Scalar,
+ std::enable_if_t<IsNumber<Scalar>::value, int> = 0>
+ friend auto operator* ( const FieldMatrix& matrix, Scalar scalar)
+ {
+ FieldMatrix<typename PromotionTraits<K,Scalar>::PromotedType,ROWS,COLS> result;
+
+ for (size_type i = 0; i < ROWS; ++i)
+ for (size_type j = 0; j < COLS; ++j)
+ result[i][j] = matrix[i][j] * scalar;
+
+ return result;
+ }
+
+ //! vector space multiplication with scalar
+ template <class Scalar,
+ std::enable_if_t<IsNumber<Scalar>::value, int> = 0>
+ friend auto operator* ( Scalar scalar, const FieldMatrix& matrix)
+ {
+ FieldMatrix<typename PromotionTraits<K,Scalar>::PromotedType,ROWS,COLS> result;
+
+ for (size_type i = 0; i < ROWS; ++i)
+ for (size_type j = 0; j < COLS; ++j)
+ result[i][j] = scalar * matrix[i][j];
+
+ return result;
+ }
+
+ //! vector space division by scalar
+ template <class Scalar,
+ std::enable_if_t<IsNumber<Scalar>::value, int> = 0>
+ friend auto operator/ ( const FieldMatrix& matrix, Scalar scalar)
+ {
+ FieldMatrix<typename PromotionTraits<K,Scalar>::PromotedType,ROWS,COLS> result;
+
+ for (size_type i = 0; i < ROWS; ++i)
+ for (size_type j = 0; j < COLS; ++j)
+ result[i][j] = matrix[i][j] / scalar;
+
+ return result;
+ }
+
+ /** \brief Matrix-matrix multiplication
+ */
+ template <class OtherScalar, int otherCols>
+ friend auto operator* ( const FieldMatrix& matrixA,
+ const FieldMatrix<OtherScalar, COLS, otherCols>& matrixB)
+ {
+ FieldMatrix<typename PromotionTraits<K,OtherScalar>::PromotedType,ROWS,otherCols> result;
+
+ for (size_type i = 0; i < matrixA.mat_rows(); ++i)
+ for (size_type j = 0; j < matrixB.mat_cols(); ++j)
+ {
+ result[i][j] = 0;
+ for (size_type k = 0; k < matrixA.mat_cols(); ++k)
+ result[i][j] += matrixA[i][k] * matrixB[k][j];
+ }
+
+ return result;
+ }
+
+ //! Multiplies M from the left to this matrix, this matrix is not modified
+ template<int l>
+ FieldMatrix<K,l,cols> leftmultiplyany (const FieldMatrix<K,l,rows>& M) const
+ {
+ FieldMatrix<K,l,cols> C;
+
+ for (size_type i=0; i<l; i++) {
+ for (size_type j=0; j<cols; j++) {
+ C[i][j] = 0;
+ for (size_type k=0; k<rows; k++)
+ C[i][j] += M[i][k]*(*this)[k][j];
+ }
+ }
+ return C;
+ }
+
+ using Base::rightmultiply;
+
+ //! Multiplies M from the right to this matrix
+ template <int r, int c>
+ FieldMatrix& rightmultiply (const FieldMatrix<K,r,c>& M)
+ {
+ static_assert(r == c, "Cannot rightmultiply with non-square matrix");
+ static_assert(r == cols, "Size mismatch");
+ FieldMatrix<K,rows,cols> C(*this);
+
+ for (size_type i=0; i<rows; i++)
+ for (size_type j=0; j<cols; j++) {
+ (*this)[i][j] = 0;
+ for (size_type k=0; k<cols; k++)
+ (*this)[i][j] += C[i][k]*M[k][j];
+ }
+ return *this;
+ }
+
+ //! Multiplies M from the right to this matrix, this matrix is not modified
+ template<int l>
+ FieldMatrix<K,rows,l> rightmultiplyany (const FieldMatrix<K,cols,l>& M) const
+ {
+ FieldMatrix<K,rows,l> C;
+
+ for (size_type i=0; i<rows; i++) {
+ for (size_type j=0; j<l; j++) {
+ C[i][j] = 0;
+ for (size_type k=0; k<cols; k++)
+ C[i][j] += (*this)[i][k]*M[k][j];
+ }
+ }
+ return C;
+ }
+
+ // make this thing a matrix
+ static constexpr size_type mat_rows() { return ROWS; }
+ static constexpr size_type mat_cols() { return COLS; }
+
+ row_reference mat_access ( size_type i )
+ {
+ DUNE_ASSERT_BOUNDS(i < ROWS);
+ return _data[i];
+ }
+
+ const_row_reference mat_access ( size_type i ) const
+ {
+ DUNE_ASSERT_BOUNDS(i < ROWS);
+ return _data[i];
+ }
+ };
+
+#ifndef DOXYGEN // hide specialization
+ /** \brief Special type for 1x1 matrices
+ */
+ template<class K>
+ class FieldMatrix<K,1,1> : public DenseMatrix< FieldMatrix<K,1,1> >
+ {
+ FieldVector<K,1> _data;
+ typedef DenseMatrix< FieldMatrix<K,1,1> > Base;
+ public:
+ // standard constructor and everything is sufficient ...
+
+ //===== type definitions and constants
+
+ //! The type used for index access and size operations
+ typedef typename Base::size_type size_type;
+
+ //! We are at the leaf of the block recursion
+ enum {
+ //! The number of block levels we contain.
+ //! This is always one for this type.
+ blocklevel = 1
+ };
+
+ typedef typename Base::row_type row_type;
+
+ typedef typename Base::row_reference row_reference;
+ typedef typename Base::const_row_reference const_row_reference;
+
+ //! export size
+ enum {
+ //! \brief The number of rows.
+ //! This is always one for this type.
+ rows = 1,
+ //! \brief The number of columns.
+ //! This is always one for this type.
+ cols = 1
+ };
+
+ //===== constructors
+ /** \brief Default constructor
+ */
+ constexpr FieldMatrix() = default;
+
+ /** \brief Constructor initializing the matrix from a list of vector
+ */
+ FieldMatrix(std::initializer_list<Dune::FieldVector<K, 1>> const &l)
+ {
+ std::copy_n(l.begin(), std::min(static_cast< std::size_t >( 1 ), l.size()), &_data);
+ }
+
+ template <class T,
+ typename = std::enable_if_t<HasDenseMatrixAssigner<FieldMatrix, T>::value>>
+ FieldMatrix(T const& rhs)
+ {
+ *this = rhs;
+ }
+
+ using Base::operator=;
+
+ //! vector space addition -- two-argument version
+ template <class OtherScalar>
+ friend auto operator+ ( const FieldMatrix& matrixA,
+ const FieldMatrix<OtherScalar,1,1>& matrixB)
+ {
+ return FieldMatrix<typename PromotionTraits<K,OtherScalar>::PromotedType,1,1>{matrixA[0][0] + matrixB[0][0]};
+ }
+
+ //! Binary addition when treating FieldMatrix<K,1,1> like K
+ template <class Scalar,
+ std::enable_if_t<IsNumber<Scalar>::value, int> = 0>
+ friend auto operator+ ( const FieldMatrix& matrix,
+ const Scalar& scalar)
+ {
+ return FieldMatrix<typename PromotionTraits<K,Scalar>::PromotedType,1,1>{matrix[0][0] + scalar};
+ }
+
+ //! Binary addition when treating FieldMatrix<K,1,1> like K
+ template <class Scalar,
+ std::enable_if_t<IsNumber<Scalar>::value, int> = 0>
+ friend auto operator+ ( const Scalar& scalar,
+ const FieldMatrix& matrix)
+ {
+ return FieldMatrix<typename PromotionTraits<Scalar,K>::PromotedType,1,1>{scalar + matrix[0][0]};
+ }
+
+ //! vector space subtraction -- two-argument version
+ template <class OtherScalar>
+ friend auto operator- ( const FieldMatrix& matrixA,
+ const FieldMatrix<OtherScalar,1,1>& matrixB)
+ {
+ return FieldMatrix<typename PromotionTraits<K,OtherScalar>::PromotedType,1,1>{matrixA[0][0] - matrixB[0][0]};
+ }
+
+ //! Binary subtraction when treating FieldMatrix<K,1,1> like K
+ template <class Scalar,
+ std::enable_if_t<IsNumber<Scalar>::value, int> = 0>
+ friend auto operator- ( const FieldMatrix& matrix,
+ const Scalar& scalar)
+ {
+ return FieldMatrix<typename PromotionTraits<K,Scalar>::PromotedType,1,1>{matrix[0][0] - scalar};
+ }
+
+ //! Binary subtraction when treating FieldMatrix<K,1,1> like K
+ template <class Scalar,
+ std::enable_if_t<IsNumber<Scalar>::value, int> = 0>
+ friend auto operator- ( const Scalar& scalar,
+ const FieldMatrix& matrix)
+ {
+ return FieldMatrix<typename PromotionTraits<Scalar,K>::PromotedType,1,1>{scalar - matrix[0][0]};
+ }
+
+ //! vector space multiplication with scalar
+ template <class Scalar,
+ std::enable_if_t<IsNumber<Scalar>::value, int> = 0>
+ friend auto operator* ( const FieldMatrix& matrix, Scalar scalar)
+ {
+ return FieldMatrix<typename PromotionTraits<K,Scalar>::PromotedType,1,1> {matrix[0][0] * scalar};
+ }
+
+ //! vector space multiplication with scalar
+ template <class Scalar,
+ std::enable_if_t<IsNumber<Scalar>::value, int> = 0>
+ friend auto operator* ( Scalar scalar, const FieldMatrix& matrix)
+ {
+ return FieldMatrix<typename PromotionTraits<K,Scalar>::PromotedType,1,1> {scalar * matrix[0][0]};
+ }
+
+ //! vector space division by scalar
+ template <class Scalar,
+ std::enable_if_t<IsNumber<Scalar>::value, int> = 0>
+ friend auto operator/ ( const FieldMatrix& matrix, Scalar scalar)
+ {
+ return FieldMatrix<typename PromotionTraits<K,Scalar>::PromotedType,1,1> {matrix[0][0] / scalar};
+ }
+
+ //===== solve
+
+ /** \brief Matrix-matrix multiplication
+ */
+ template <class OtherScalar, int otherCols>
+ friend auto operator* ( const FieldMatrix& matrixA,
+ const FieldMatrix<OtherScalar, 1, otherCols>& matrixB)
+ {
+ FieldMatrix<typename PromotionTraits<K,OtherScalar>::PromotedType,1,otherCols> result;
+
+ for (size_type j = 0; j < matrixB.mat_cols(); ++j)
+ result[0][j] = matrixA[0][0] * matrixB[0][j];
+
+ return result;
+ }
+
+ //! Multiplies M from the left to this matrix, this matrix is not modified
+ template<int l>
+ FieldMatrix<K,l,1> leftmultiplyany (const FieldMatrix<K,l,1>& M) const
+ {
+ FieldMatrix<K,l,1> C;
+ for (size_type j=0; j<l; j++)
+ C[j][0] = M[j][0]*(*this)[0][0];
+ return C;
+ }
+
+ //! left multiplication
+ FieldMatrix& rightmultiply (const FieldMatrix& M)
+ {
+ _data[0] *= M[0][0];
+ return *this;
+ }
+
+ //! Multiplies M from the right to this matrix, this matrix is not modified
+ template<int l>
+ FieldMatrix<K,1,l> rightmultiplyany (const FieldMatrix<K,1,l>& M) const
+ {
+ FieldMatrix<K,1,l> C;
+
+ for (size_type j=0; j<l; j++)
+ C[0][j] = M[0][j]*_data[0];
+ return C;
+ }
+
+ // make this thing a matrix
+ static constexpr size_type mat_rows() { return 1; }
+ static constexpr size_type mat_cols() { return 1; }
+
+ row_reference mat_access ([[maybe_unused]] size_type i)
+ {
+ DUNE_ASSERT_BOUNDS(i == 0);
+ return _data;
+ }
+
+ const_row_reference mat_access ([[maybe_unused]] size_type i) const
+ {
+ DUNE_ASSERT_BOUNDS(i == 0);
+ return _data;
+ }
+
+ //! add scalar
+ FieldMatrix& operator+= (const K& k)
+ {
+ _data[0] += k;
+ return (*this);
+ }
+
+ //! subtract scalar
+ FieldMatrix& operator-= (const K& k)
+ {
+ _data[0] -= k;
+ return (*this);
+ }
+
+ //! multiplication with scalar
+ FieldMatrix& operator*= (const K& k)
+ {
+ _data[0] *= k;
+ return (*this);
+ }
+
+ //! division by scalar
+ FieldMatrix& operator/= (const K& k)
+ {
+ _data[0] /= k;
+ return (*this);
+ }
+
+ //===== conversion operator
+
+ operator const K& () const { return _data[0]; }
+
+ };
+
+ /** \brief Sends the matrix to an output stream */
+ template<typename K>
+ std::ostream& operator<< (std::ostream& s, const FieldMatrix<K,1,1>& a)
+ {
+ s << a[0][0];
+ return s;
+ }
+
+#endif // DOXYGEN
+
+ namespace FMatrixHelp {
+
+ //! invert scalar without changing the original matrix
+ template <typename K>
+ static inline K invertMatrix (const FieldMatrix<K,1,1> &matrix, FieldMatrix<K,1,1> &inverse)
+ {
+ using real_type = typename FieldTraits<K>::real_type;
+ inverse[0][0] = real_type(1.0)/matrix[0][0];
+ return matrix[0][0];
+ }
+
+ //! invert scalar without changing the original matrix
+ template <typename K>
+ static inline K invertMatrix_retTransposed (const FieldMatrix<K,1,1> &matrix, FieldMatrix<K,1,1> &inverse)
+ {
+ return invertMatrix(matrix,inverse);
+ }
+
+
+ //! invert 2x2 Matrix without changing the original matrix
+ template <typename K>
+ static inline K invertMatrix (const FieldMatrix<K,2,2> &matrix, FieldMatrix<K,2,2> &inverse)
+ {
+ using real_type = typename FieldTraits<K>::real_type;
+ // code generated by maple
+ K det = (matrix[0][0]*matrix[1][1] - matrix[0][1]*matrix[1][0]);
+ K det_1 = real_type(1.0)/det;
+ inverse[0][0] = matrix[1][1] * det_1;
+ inverse[0][1] = - matrix[0][1] * det_1;
+ inverse[1][0] = - matrix[1][0] * det_1;
+ inverse[1][1] = matrix[0][0] * det_1;
+ return det;
+ }
+
+ //! invert 2x2 Matrix without changing the original matrix
+ //! return transposed matrix
+ template <typename K>
+ static inline K invertMatrix_retTransposed (const FieldMatrix<K,2,2> &matrix, FieldMatrix<K,2,2> &inverse)
+ {
+ using real_type = typename FieldTraits<K>::real_type;
+ // code generated by maple
+ K det = (matrix[0][0]*matrix[1][1] - matrix[0][1]*matrix[1][0]);
+ K det_1 = real_type(1.0)/det;
+ inverse[0][0] = matrix[1][1] * det_1;
+ inverse[1][0] = - matrix[0][1] * det_1;
+ inverse[0][1] = - matrix[1][0] * det_1;
+ inverse[1][1] = matrix[0][0] * det_1;
+ return det;
+ }
+
+ //! invert 3x3 Matrix without changing the original matrix
+ template <typename K>
+ static inline K invertMatrix (const FieldMatrix<K,3,3> &matrix, FieldMatrix<K,3,3> &inverse)
+ {
+ using real_type = typename FieldTraits<K>::real_type;
+ // code generated by maple
+ K t4 = matrix[0][0] * matrix[1][1];
+ K t6 = matrix[0][0] * matrix[1][2];
+ K t8 = matrix[0][1] * matrix[1][0];
+ K t10 = matrix[0][2] * matrix[1][0];
+ K t12 = matrix[0][1] * matrix[2][0];
+ K t14 = matrix[0][2] * matrix[2][0];
+
+ K det = (t4*matrix[2][2]-t6*matrix[2][1]-t8*matrix[2][2]+
+ t10*matrix[2][1]+t12*matrix[1][2]-t14*matrix[1][1]);
+ K t17 = real_type(1.0)/det;
+
+ inverse[0][0] = (matrix[1][1] * matrix[2][2] - matrix[1][2] * matrix[2][1])*t17;
+ inverse[0][1] = -(matrix[0][1] * matrix[2][2] - matrix[0][2] * matrix[2][1])*t17;
+ inverse[0][2] = (matrix[0][1] * matrix[1][2] - matrix[0][2] * matrix[1][1])*t17;
+ inverse[1][0] = -(matrix[1][0] * matrix[2][2] - matrix[1][2] * matrix[2][0])*t17;
+ inverse[1][1] = (matrix[0][0] * matrix[2][2] - t14) * t17;
+ inverse[1][2] = -(t6-t10) * t17;
+ inverse[2][0] = (matrix[1][0] * matrix[2][1] - matrix[1][1] * matrix[2][0]) * t17;
+ inverse[2][1] = -(matrix[0][0] * matrix[2][1] - t12) * t17;
+ inverse[2][2] = (t4-t8) * t17;
+
+ return det;
+ }
+
+ //! invert 3x3 Matrix without changing the original matrix
+ template <typename K>
+ static inline K invertMatrix_retTransposed (const FieldMatrix<K,3,3> &matrix, FieldMatrix<K,3,3> &inverse)
+ {
+ using real_type = typename FieldTraits<K>::real_type;
+ // code generated by maple
+ K t4 = matrix[0][0] * matrix[1][1];
+ K t6 = matrix[0][0] * matrix[1][2];
+ K t8 = matrix[0][1] * matrix[1][0];
+ K t10 = matrix[0][2] * matrix[1][0];
+ K t12 = matrix[0][1] * matrix[2][0];
+ K t14 = matrix[0][2] * matrix[2][0];
+
+ K det = (t4*matrix[2][2]-t6*matrix[2][1]-t8*matrix[2][2]+
+ t10*matrix[2][1]+t12*matrix[1][2]-t14*matrix[1][1]);
+ K t17 = real_type(1.0)/det;
+
+ inverse[0][0] = (matrix[1][1] * matrix[2][2] - matrix[1][2] * matrix[2][1])*t17;
+ inverse[1][0] = -(matrix[0][1] * matrix[2][2] - matrix[0][2] * matrix[2][1])*t17;
+ inverse[2][0] = (matrix[0][1] * matrix[1][2] - matrix[0][2] * matrix[1][1])*t17;
+ inverse[0][1] = -(matrix[1][0] * matrix[2][2] - matrix[1][2] * matrix[2][0])*t17;
+ inverse[1][1] = (matrix[0][0] * matrix[2][2] - t14) * t17;
+ inverse[2][1] = -(t6-t10) * t17;
+ inverse[0][2] = (matrix[1][0] * matrix[2][1] - matrix[1][1] * matrix[2][0]) * t17;
+ inverse[1][2] = -(matrix[0][0] * matrix[2][1] - t12) * t17;
+ inverse[2][2] = (t4-t8) * t17;
+
+ return det;
+ }
+
+ //! calculates ret = A * B
+ template< class K, int m, int n, int p >
+ static inline void multMatrix ( const FieldMatrix< K, m, n > &A,
+ const FieldMatrix< K, n, p > &B,
+ FieldMatrix< K, m, p > &ret )
+ {
+ typedef typename FieldMatrix< K, m, p > :: size_type size_type;
+
+ for( size_type i = 0; i < m; ++i )
+ {
+ for( size_type j = 0; j < p; ++j )
+ {
+ ret[ i ][ j ] = K( 0 );
+ for( size_type k = 0; k < n; ++k )
+ ret[ i ][ j ] += A[ i ][ k ] * B[ k ][ j ];
+ }
+ }
+ }
+
+ //! calculates ret= A_t*A
+ template <typename K, int rows, int cols>
+ static inline void multTransposedMatrix(const FieldMatrix<K,rows,cols> &matrix, FieldMatrix<K,cols,cols>& ret)
+ {
+ typedef typename FieldMatrix<K,rows,cols>::size_type size_type;
+
+ for(size_type i=0; i<cols; i++)
+ for(size_type j=0; j<cols; j++)
+ {
+ ret[i][j]=0.0;
+ for(size_type k=0; k<rows; k++)
+ ret[i][j]+=matrix[k][i]*matrix[k][j];
+ }
+ }
+
+ using Dune::DenseMatrixHelp::multAssign;
+
+ //! calculates ret = matrix^T * x
+ template <typename K, int rows, int cols>
+ static inline void multAssignTransposed( const FieldMatrix<K,rows,cols> &matrix, const FieldVector<K,rows> & x, FieldVector<K,cols> & ret)
+ {
+ typedef typename FieldMatrix<K,rows,cols>::size_type size_type;
+
+ for(size_type i=0; i<cols; ++i)
+ {
+ ret[i] = 0.0;
+ for(size_type j=0; j<rows; ++j)
+ ret[i] += matrix[j][i]*x[j];
+ }
+ }
+
+ //! calculates ret = matrix * x
+ template <typename K, int rows, int cols>
+ static inline FieldVector<K,rows> mult(const FieldMatrix<K,rows,cols> &matrix, const FieldVector<K,cols> & x)
+ {
+ FieldVector<K,rows> ret;
+ multAssign(matrix,x,ret);
+ return ret;
+ }
+
+ //! calculates ret = matrix^T * x
+ template <typename K, int rows, int cols>
+ static inline FieldVector<K,cols> multTransposed(const FieldMatrix<K,rows,cols> &matrix, const FieldVector<K,rows> & x)
+ {
+ FieldVector<K,cols> ret;
+ multAssignTransposed( matrix, x, ret );
+ return ret;
+ }
+
+ } // end namespace FMatrixHelp
+
+ /** @} end documentation */
+
+} // end namespace
+
+#include "fmatrixev.hh"
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_FMATRIXEIGENVALUES_CC
+#define DUNE_FMATRIXEIGENVALUES_CC
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <iostream>
+#include <cmath>
+#include <cassert>
+
+#include <dune/common/exceptions.hh>
+
+#if HAVE_LAPACK
+
+#ifdef LAPACK_NEEDS_UNDERLINE
+ #define LAPACK_MANGLE(name,NAME) name##_
+#else
+ #define LAPACK_MANGLE(name,NAME) name
+#endif
+
+#define FC_FUNC LAPACK_MANGLE
+
+// symmetric matrices
+#define DSYEV_FORTRAN FC_FUNC (dsyev, DSYEV)
+#define SSYEV_FORTRAN FC_FUNC (ssyev, SSYEV)
+
+// nonsymmetric matrices
+#define DGEEV_FORTRAN FC_FUNC (dgeev, DGEEV)
+#define SGEEV_FORTRAN FC_FUNC (sgeev, SGEEV)
+
+// dsyev declaration (in liblapack)
+extern "C" {
+
+ /*
+ *
+ ** purpose
+ ** =======
+ **
+ ** xsyev computes all eigenvalues and, optionally, eigenvectors of a
+ ** BASE DATA TYPE symmetric matrix a.
+ **
+ ** arguments
+ ** =========
+ **
+ ** jobz (input) char
+ ** = 'n': compute eigenvalues only;
+ ** = 'v': compute eigenvalues and eigenvectors.
+ **
+ ** uplo (input) char
+ ** = 'u': upper triangle of a is stored;
+ ** = 'l': lower triangle of a is stored.
+ **
+ ** n (input) long int
+ ** the order of the matrix a. n >= 0.
+ **
+ ** a (input/output) BASE DATA TYPE array, dimension (lda, n)
+ ** on entry, the symmetric matrix a. if uplo = 'u', the
+ ** leading n-by-n upper triangular part of a contains the
+ ** upper triangular part of the matrix a. if uplo = 'l',
+ ** the leading n-by-n lower triangular part of a contains
+ ** the lower triangular part of the matrix a.
+ ** on exit, if jobz = 'v', then if info = 0, a contains the
+ ** orthonormal eigenvectors of the matrix a.
+ ** if jobz = 'n', then on exit the lower triangle (if uplo='l')
+ ** or the upper triangle (if uplo='u') of a, including the
+ ** diagonal, is destroyed.
+ **
+ ** lda (input) long int
+ ** the leading dimension of the array a. lda >= max(1,n).
+ **
+ ** w (output) BASE DATA TYPE array, dimension (n)
+ ** if info = 0, the eigenvalues in ascending order.
+ **
+ ** work (workspace/output) DOUBLE PRECISION array, dimension (MAX(1,LWORK))
+ ** On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+ **
+ ** lwork (input) INTEGER
+ ** The length of the array WORK. LWORK >= max(1,3*N-1).
+ ** For optimal efficiency, LWORK >= (NB+2)*N,
+ ** where NB is the blocksize for DSYTRD returned by ILAENV.
+ **
+ ** If LWORK = -1, then a workspace query is assumed; the routine
+ ** only calculates the optimal size of the WORK array, returns
+ ** this value as the first entry of the WORK array, and no error
+ ** message related to LWORK is issued by XERBLA.
+ **
+ **
+ ** info (output) long int
+ ** = 0: successful exit
+ ** < 0: if info = -i, the i-th argument had an illegal value
+ ** > 0: if info = i, the algorithm failed to converge; i
+ ** off-diagonal elements of an intermediate tridiagonal
+ ** form did not converge to zero.
+ **
+ **/
+ extern void DSYEV_FORTRAN(const char* jobz, const char* uplo, const long
+ int* n, double* a, const long int* lda, double* w,
+ double* work, const long int* lwork, long int* info);
+ extern void SSYEV_FORTRAN(const char* jobz, const char* uplo, const long
+ int* n, float* a, const long int* lda, float* w,
+ float* work, const long int* lwork, long int* info);
+
+ /*
+ *
+ ** purpose
+ ** =======
+ **
+ ** xgeev computes for an N-by-N BASE DATA TYPE nonsymmetric matrix A, the
+ ** eigenvalues and, optionally, the left and/or right eigenvectors.
+ **
+ ** The right eigenvector v(j) of A satisfies
+ ** A * v(j) = lambda(j) * v(j)
+ ** where lambda(j) is its eigenvalue.
+ ** The left eigenvector u(j) of A satisfies
+ ** u(j)**T * A = lambda(j) * u(j)**T
+ ** where u(j)**T denotes the transpose of u(j).
+ **
+ ** The computed eigenvectors are normalized to have Euclidean norm
+ ** equal to 1 and largest component real.
+ **
+ ** arguments
+ ** =========
+ **
+ ** jobvl (input) char
+ ** = 'n': left eigenvectors of a are not computed;
+ ** = 'v': left eigenvectors of a are computed.
+ **
+ ** jobvr (input) char
+ ** = 'n': right eigenvectors of a are not computed;
+ ** = 'v': right eigenvectors of a are computed.
+ **
+ ** n (input) long int
+ ** the order of the matrix v. v >= 0.
+ **
+ ** a (input/output) BASE DATA TYPE array, dimension (lda,n)
+ ** on entry, the n-by-n matrix a.
+ ** on exit, a has been overwritten.
+ **
+ ** lda (input) long int
+ ** the leading dimension of the array a. lda >= max(1,n).
+ **
+ ** wr (output) BASE DATA TYPE array, dimension (n)
+ ** wi (output) BASE DATA TYPE array, dimension (n)
+ ** wr and wi contain the real and imaginary parts,
+ ** respectively, of the computed eigenvalues. complex
+ ** conjugate pairs of eigenvalues appear consecutively
+ ** with the eigenvalue having the positive imaginary part
+ ** first.
+ **
+ ** vl (output) COMPLEX DATA TYPE array, dimension (ldvl,n)
+ ** if jobvl = 'v', the left eigenvectors u(j) are stored one
+ ** after another in the columns of vl, in the same order
+ ** as their eigenvalues.
+ ** if jobvl = 'n', vl is not referenced.
+ ** if the j-th eigenvalue is real, then u(j) = vl(:,j),
+ ** the j-th column of vl.
+ ** if the j-th and (j+1)-st eigenvalues form a complex
+ ** conjugate pair, then u(j) = vl(:,j) + i*vl(:,j+1) and
+ ** u(j+1) = vl(:,j) - i*vl(:,j+1).
+ **
+ ** ldvl (input) long int
+ ** the leading dimension of the array vl. ldvl >= 1; if
+ ** jobvl = 'v', ldvl >= n.
+ **
+ ** vr (output) COMPLEX DATA TYPE array, dimension (ldvr,n)
+ ** if jobvr = 'v', the right eigenvectors v(j) are stored one
+ ** after another in the columns of vr, in the same order
+ ** as their eigenvalues.
+ ** if jobvr = 'n', vr is not referenced.
+ ** if the j-th eigenvalue is real, then v(j) = vr(:,j),
+ ** the j-th column of vr.
+ ** if the j-th and (j+1)-st eigenvalues form a complex
+ ** conjugate pair, then v(j) = vr(:,j) + i*vr(:,j+1) and
+ ** v(j+1) = vr(:,j) - i*vr(:,j+1).
+ **
+ ** ldvr (input) long int
+ ** the leading dimension of the array vr. ldvr >= 1; if
+ ** jobvr = 'v', ldvr >= n.
+ **
+ ** work (workspace/output) BASE DATA TYPE array, dimension (max(1,lwork))
+ ** on exit, if info = 0, work(1) returns the optimal lwork.
+ **
+ ** lwork (input) long int
+ ** the dimension of the array work. lwork >= max(1,3*n), and
+ ** if jobvl = 'v' or jobvr = 'v', lwork >= 4*n. for good
+ ** performance, lwork must generally be larger.
+ **
+ ** if lwork = -1, then a workspace query is assumed; the routine
+ ** only calculates the optimal size of the work array, returns
+ ** this value as the first entry of the work array, and no error
+ ** message related to lwork is issued by xerbla.
+ **
+ ** info (output) long int
+ ** = 0: successful exit
+ ** < 0: if info = -i, the i-th argument had an illegal value.
+ ** > 0: if info = i, the qr algorithm failed to compute all the
+ ** eigenvalues, and no eigenvectors have been computed;
+ ** elements i+1:n of wr and wi contain eigenvalues which
+ ** have converged.
+ **
+ **/
+
+ extern void DGEEV_FORTRAN(const char* jobvl, const char* jobvr, const long
+ int* n, double* a, const long int* lda, double* wr, double* wi, double* vl,
+ const long int* ldvl, double* vr, const long int* ldvr, double* work,
+ const long int* lwork, long int* info);
+ extern void SGEEV_FORTRAN(const char* jobvl, const char* jobvr, const long
+ int* n, float* a, const long int* lda, float* wr, float* wi, float* vl,
+ const long int* ldvl, float* vr, const long int* ldvr, float* work,
+ const long int* lwork, long int* info);
+
+} // end extern C
+
+namespace Dune {
+
+ namespace FMatrixHelp {
+
+ void eigenValuesLapackCall(
+ const char* jobz, const char* uplo, const long
+ int* n, double* a, const long int* lda, double* w,
+ double* work, const long int* lwork, long int* info)
+ {
+ // call LAPACK dsyev
+ DSYEV_FORTRAN(jobz, uplo, n, a, lda, w, work, lwork, info);
+ }
+
+ void eigenValuesLapackCall(
+ const char* jobz, const char* uplo, const long
+ int* n, float* a, const long int* lda, float* w,
+ float* work, const long int* lwork, long int* info)
+ {
+ // call LAPACK dsyev
+ SSYEV_FORTRAN(jobz, uplo, n, a, lda, w, work, lwork, info);
+ }
+
+ void eigenValuesNonsymLapackCall(
+ const char* jobvl, const char* jobvr, const long
+ int* n, double* a, const long int* lda, double* wr, double* wi, double* vl,
+ const long int* ldvl, double* vr, const long int* ldvr, double* work,
+ const long int* lwork, long int* info)
+ {
+ // call LAPACK dgeev
+ DGEEV_FORTRAN(jobvl, jobvr, n, a, lda, wr, wi, vl, ldvl, vr, ldvr,
+ work, lwork, info);
+ }
+
+ void eigenValuesNonsymLapackCall(
+ const char* jobvl, const char* jobvr, const long
+ int* n, float* a, const long int* lda, float* wr, float* wi, float* vl,
+ const long int* ldvl, float* vr, const long int* ldvr, float* work,
+ const long int* lwork, long int* info)
+ {
+ // call LAPACK dgeev
+ SGEEV_FORTRAN(jobvl, jobvr, n, a, lda, wr, wi, vl, ldvl, vr, ldvr,
+ work, lwork, info);
+ }
+ } // end namespace FMatrixHelp
+} // end namespace Dune
+
+#endif // #if HAVE_LAPACK
+#endif // #ifndef DUNE_FMATRIXEIGENVALUES_CC
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_FMATRIXEIGENVALUES_HH
+#define DUNE_FMATRIXEIGENVALUES_HH
+
+/** \file
+ * \brief Eigenvalue computations for the FieldMatrix class
+ */
+
+#include <algorithm>
+#include <iostream>
+#include <cmath>
+#include <cassert>
+
+#include <dune/common/exceptions.hh>
+#include <dune/common/fvector.hh>
+#include <dune/common/fmatrix.hh>
+#include <dune/common/math.hh>
+
+namespace Dune {
+
+ /**
+ @addtogroup DenseMatVec
+ @{
+ */
+
+ namespace FMatrixHelp {
+
+#if HAVE_LAPACK
+ // defined in fmatrixev.cc
+ extern void eigenValuesLapackCall(
+ const char* jobz, const char* uplo, const long
+ int* n, double* a, const long int* lda, double* w,
+ double* work, const long int* lwork, long int* info);
+
+ extern void eigenValuesNonsymLapackCall(
+ const char* jobvl, const char* jobvr, const long
+ int* n, double* a, const long int* lda, double* wr, double* wi, double* vl,
+ const long int* ldvl, double* vr, const long int* ldvr, double* work,
+ const long int* lwork, long int* info);
+
+ extern void eigenValuesLapackCall(
+ const char* jobz, const char* uplo, const long
+ int* n, float* a, const long int* lda, float* w,
+ float* work, const long int* lwork, long int* info);
+
+ extern void eigenValuesNonsymLapackCall(
+ const char* jobvl, const char* jobvr, const long
+ int* n, float* a, const long int* lda, float* wr, float* wi, float* vl,
+ const long int* ldvl, float* vr, const long int* ldvr, float* work,
+ const long int* lwork, long int* info);
+
+#endif
+
+ namespace Impl {
+ //internal tag to activate/disable code for eigenvector calculation at compile time
+ enum Jobs { OnlyEigenvalues=0, EigenvaluesEigenvectors=1 };
+
+ //internal dummy used if only eigenvalues are to be calculated
+ template<typename K, int dim>
+ using EVDummy = FieldMatrix<K, dim, dim>;
+
+ //compute the cross-product of two vectors
+ template<typename K>
+ inline FieldVector<K,3> crossProduct(const FieldVector<K,3>& vec0, const FieldVector<K,3>& vec1) {
+ return {vec0[1]*vec1[2] - vec0[2]*vec1[1], vec0[2]*vec1[0] - vec0[0]*vec1[2], vec0[0]*vec1[1] - vec0[1]*vec1[0]};
+ }
+
+ template <typename K>
+ static void eigenValues2dImpl(const FieldMatrix<K, 2, 2>& matrix,
+ FieldVector<K, 2>& eigenvalues)
+ {
+ using std::sqrt;
+ const K p = 0.5 * (matrix[0][0] + matrix [1][1]);
+ const K p2 = p - matrix[1][1];
+ K q = p2 * p2 + matrix[1][0] * matrix[0][1];
+ if( q < 0 && q > -1e-14 ) q = 0;
+ if (q < 0)
+ {
+ std::cout << matrix << std::endl;
+ // Complex eigenvalues are either caused by non-symmetric matrices or by round-off errors
+ DUNE_THROW(MathError, "Complex eigenvalue detected (which this implementation cannot handle).");
+ }
+
+ // get square root
+ q = sqrt(q);
+
+ // store eigenvalues in ascending order
+ eigenvalues[0] = p - q;
+ eigenvalues[1] = p + q;
+ }
+
+ /*
+ This implementation was adapted from the pseudo-code (Python?) implementation found on
+ http://en.wikipedia.org/wiki/Eigenvalue_algorithm (retrieved late August 2014).
+ Wikipedia claims to have taken it from
+ Smith, Oliver K. (April 1961), Eigenvalues of a symmetric 3 × 3 matrix.,
+ Communications of the ACM 4 (4): 168, doi:10.1145/355578.366316
+ */
+ template <typename K>
+ static K eigenValues3dImpl(const FieldMatrix<K, 3, 3>& matrix,
+ FieldVector<K, 3>& eigenvalues)
+ {
+ using std::sqrt;
+ using std::acos;
+ using real_type = typename FieldTraits<K>::real_type;
+ const K pi = MathematicalConstants<K>::pi();
+ K p1 = matrix[0][1]*matrix[0][1] + matrix[0][2]*matrix[0][2] + matrix[1][2]*matrix[1][2];
+
+ if (p1 <= std::numeric_limits<K>::epsilon()) {
+ // A is diagonal.
+ eigenvalues[0] = matrix[0][0];
+ eigenvalues[1] = matrix[1][1];
+ eigenvalues[2] = matrix[2][2];
+ std::sort(eigenvalues.begin(), eigenvalues.end());
+
+ return 0.0;
+ }
+ else
+ {
+ // q = trace(A)/3
+ K q = 0;
+ for (int i=0; i<3; i++)
+ q += matrix[i][i] / 3.0;
+
+ K p2 = (matrix[0][0] - q)*(matrix[0][0] - q) + (matrix[1][1] - q)*(matrix[1][1] - q) + (matrix[2][2] - q)*(matrix[2][2] - q) + 2.0 * p1;
+ K p = sqrt(p2 / 6);
+ // B = (1 / p) * (A - q * I); // I is the identity matrix
+ FieldMatrix<K,3,3> B;
+ for (int i=0; i<3; i++)
+ for (int j=0; j<3; j++)
+ B[i][j] = (real_type(1.0)/p) * (matrix[i][j] - q*(i==j));
+
+ K r = B.determinant() / 2.0;
+
+ /*In exact arithmetic for a symmetric matrix -1 <= r <= 1
+ but computation error can leave it slightly outside this range.
+ acos(z) function requires |z| <= 1, but will fail silently
+ and return NaN if the input is larger than 1 in magnitude.
+ Thus r is clamped to [-1,1].*/
+ r = std::min<K>(std::max<K>(r, -1.0), 1.0);
+ K phi = acos(r) / 3.0;
+
+ // the eigenvalues satisfy eig[2] <= eig[1] <= eig[0]
+ eigenvalues[2] = q + 2 * p * cos(phi);
+ eigenvalues[0] = q + 2 * p * cos(phi + (2*pi/3));
+ eigenvalues[1] = 3 * q - eigenvalues[0] - eigenvalues[2]; // since trace(matrix) = eig1 + eig2 + eig3
+
+ return r;
+ }
+ }
+
+ //see https://www.geometrictools.com/Documentation/RobustEigenSymmetric3x3.pdf
+ //Robustly compute a right-handed orthonormal set {u, v, evec0}.
+ template<typename K>
+ void orthoComp(const FieldVector<K,3>& evec0, FieldVector<K,3>& u, FieldVector<K,3>& v) {
+ if(std::abs(evec0[0]) > std::abs(evec0[1])) {
+ //The component of maximum absolute value is either evec0[0] or evec0[2].
+ FieldVector<K,2> temp = {evec0[0], evec0[2]};
+ auto L = 1.0 / temp.two_norm();
+ u = L * FieldVector<K,3>({-evec0[2], 0.0, evec0[0]});
+ }
+ else {
+ //The component of maximum absolute value is either evec0[1] or evec0[2].
+ FieldVector<K,2> temp = {evec0[1], evec0[2]};
+ auto L = 1.0 / temp.two_norm();
+ u = L * FieldVector<K,3>({0.0, evec0[2], -evec0[1]});
+ }
+ v = crossProduct(evec0, u);
+ }
+
+ //see https://www.geometrictools.com/Documentation/RobustEigenSymmetric3x3.pdf
+ template<typename K>
+ void eig0(const FieldMatrix<K,3,3>& matrix, K eval0, FieldVector<K,3>& evec0) {
+ /* Compute a unit-length eigenvector for eigenvalue[i0]. The
+ matrix is rank 2, so two of the rows are linearly independent.
+ For a robust computation of the eigenvector, select the two
+ rows whose cross product has largest length of all pairs of
+ rows. */
+ using Vector = FieldVector<K,3>;
+ Vector row0 = {matrix[0][0]-eval0, matrix[0][1], matrix[0][2]};
+ Vector row1 = {matrix[1][0], matrix[1][1]-eval0, matrix[1][2]};
+ Vector row2 = {matrix[2][0], matrix[2][1], matrix[2][2]-eval0};
+
+ Vector r0xr1 = crossProduct(row0, row1);
+ Vector r0xr2 = crossProduct(row0, row2);
+ Vector r1xr2 = crossProduct(row1, row2);
+ auto d0 = r0xr1.two_norm();
+ auto d1 = r0xr2.two_norm();
+ auto d2 = r1xr2.two_norm();
+
+ auto dmax = d0 ;
+ int imax = 0;
+ if(d1>dmax) {
+ dmax = d1;
+ imax = 1;
+ }
+ if(d2>dmax)
+ imax = 2;
+
+ if(imax == 0)
+ evec0 = r0xr1 / d0;
+ else if(imax == 1)
+ evec0 = r0xr2 / d1;
+ else
+ evec0 = r1xr2 / d2;
+ }
+
+ //see https://www.geometrictools.com/Documentation/RobustEigenSymmetric3x3.pdf
+ template<typename K>
+ void eig1(const FieldMatrix<K,3,3>& matrix, const FieldVector<K,3>& evec0, FieldVector<K,3>& evec1, K eval1) {
+ using Vector = FieldVector<K,3>;
+
+ //Robustly compute a right-handed orthonormal set {u, v, evec0}.
+ Vector u,v;
+ orthoComp(evec0, u, v);
+
+ /* Let e be eval1 and let E be a corresponding eigenvector which
+ is a solution to the linear system (A - e*I)*E = 0. The matrix
+ (A - e*I) is 3x3, not invertible (so infinitely many
+ solutions), and has rank 2 when eval1 and eval are different.
+ It has rank 1 when eval1 and eval2 are equal. Numerically, it
+ is difficult to compute robustly the rank of a matrix. Instead,
+ the 3x3 linear system is reduced to a 2x2 system as follows.
+ Define the 3x2 matrix J = [u,v] whose columns are the u and v
+ computed previously. Define the 2x1 vector X = J*E. The 2x2
+ system is 0 = M * X = (J^T * (A - e*I) * J) * X where J^T is
+ the transpose of J and M = J^T * (A - e*I) * J is a 2x2 matrix.
+ The system may be written as
+ +- -++- -+ +- -+
+ | U^T*A*U - e U^T*A*V || x0 | = e * | x0 |
+ | V^T*A*U V^T*A*V - e || x1 | | x1 |
+ +- -++ -+ +- -+
+ where X has row entries x0 and x1. */
+
+ Vector Au, Av;
+ matrix.mv(u, Au);
+ matrix.mv(v, Av);
+
+ auto m00 = u.dot(Au) - eval1;
+ auto m01 = u.dot(Av);
+ auto m11 = v.dot(Av) - eval1;
+
+ /* For robustness, choose the largest-length row of M to compute
+ the eigenvector. The 2-tuple of coefficients of U and V in the
+ assignments to eigenvector[1] lies on a circle, and U and V are
+ unit length and perpendicular, so eigenvector[1] is unit length
+ (within numerical tolerance). */
+ auto absM00 = std::abs(m00);
+ auto absM01 = std::abs(m01);
+ auto absM11 = std::abs(m11);
+ if(absM00 >= absM11) {
+ auto maxAbsComp = std::max(absM00, absM01);
+ if(maxAbsComp > 0.0) {
+ if(absM00 >= absM01) {
+ m01 /= m00;
+ m00 = 1.0 / std::sqrt(1.0 + m01*m01);
+ m01 *= m00;
+ }
+ else {
+ m00 /= m01;
+ m01 = 1.0 / std::sqrt(1.0 + m00*m00);
+ m00 *= m01;
+ }
+ evec1 = m01*u - m00*v;
+ }
+ else
+ evec1 = u;
+ }
+ else {
+ auto maxAbsComp = std::max(absM11, absM01);
+ if(maxAbsComp > 0.0) {
+ if(absM11 >= absM01) {
+ m01 /= m11;
+ m11 = 1.0 / std::sqrt(1.0 + m01*m01);
+ m01 *= m11;
+ }
+ else {
+ m11 /= m01;
+ m01 = 1.0 / std::sqrt(1.0 + m11*m11);
+ m11 *= m01;
+ }
+ evec1 = m11*u - m01*v;
+ }
+ else
+ evec1 = u;
+ }
+ }
+
+ // 1d specialization
+ template<Jobs Tag, typename K>
+ static void eigenValuesVectorsImpl(const FieldMatrix<K, 1, 1>& matrix,
+ FieldVector<K, 1>& eigenValues,
+ FieldMatrix<K, 1, 1>& eigenVectors)
+ {
+ eigenValues[0] = matrix[0][0];
+ if constexpr(Tag==EigenvaluesEigenvectors)
+ eigenVectors[0] = {1.0};
+ }
+
+
+ // 2d specialization
+ template <Jobs Tag, typename K>
+ static void eigenValuesVectorsImpl(const FieldMatrix<K, 2, 2>& matrix,
+ FieldVector<K, 2>& eigenValues,
+ FieldMatrix<K, 2, 2>& eigenVectors)
+ {
+ // Compute eigen values
+ Impl::eigenValues2dImpl(matrix, eigenValues);
+
+ // Compute eigenvectors by exploiting the Cayley–Hamilton theorem.
+ // If λ_1, λ_2 are the eigenvalues, then (A - λ_1I )(A - λ_2I ) = (A - λ_2I )(A - λ_1I ) = 0,
+ // so the columns of (A - λ_2I ) are annihilated by (A - λ_1I ) and vice versa.
+ // Assuming neither matrix is zero, the columns of each must include eigenvectors
+ // for the other eigenvalue. (If either matrix is zero, then A is a multiple of the
+ // identity and any non-zero vector is an eigenvector.)
+ // From: https://en.wikipedia.org/wiki/Eigenvalue_algorithm#2x2_matrices
+ if constexpr(Tag==EigenvaluesEigenvectors) {
+
+ // Special casing for multiples of the identity
+ FieldMatrix<K,2,2> temp = matrix;
+ temp[0][0] -= eigenValues[0];
+ temp[1][1] -= eigenValues[0];
+ if(temp.infinity_norm() <= 1e-14) {
+ eigenVectors[0] = {1.0, 0.0};
+ eigenVectors[1] = {0.0, 1.0};
+ }
+ else {
+ // The columns of A - λ_2I are eigenvectors for λ_1, or zero.
+ // Take the column with the larger norm to avoid zero columns.
+ FieldVector<K,2> ev0 = {matrix[0][0]-eigenValues[1], matrix[1][0]};
+ FieldVector<K,2> ev1 = {matrix[0][1], matrix[1][1]-eigenValues[1]};
+ eigenVectors[0] = (ev0.two_norm2() >= ev1.two_norm2()) ? ev0/ev0.two_norm() : ev1/ev1.two_norm();
+
+ // The columns of A - λ_1I are eigenvectors for λ_2, or zero.
+ // Take the column with the larger norm to avoid zero columns.
+ ev0 = {matrix[0][0]-eigenValues[0], matrix[1][0]};
+ ev1 = {matrix[0][1], matrix[1][1]-eigenValues[0]};
+ eigenVectors[1] = (ev0.two_norm2() >= ev1.two_norm2()) ? ev0/ev0.two_norm() : ev1/ev1.two_norm();
+ }
+ }
+ }
+
+ // 3d specialization
+ template <Jobs Tag, typename K>
+ static void eigenValuesVectorsImpl(const FieldMatrix<K, 3, 3>& matrix,
+ FieldVector<K, 3>& eigenValues,
+ FieldMatrix<K, 3, 3>& eigenVectors)
+ {
+ using Vector = FieldVector<K,3>;
+ using Matrix = FieldMatrix<K,3,3>;
+
+ //compute eigenvalues
+ /* Precondition the matrix by factoring out the maximum absolute
+ value of the components. This guards against floating-point
+ overflow when computing the eigenvalues.*/
+ using std::isnormal;
+ K maxAbsElement = (isnormal(matrix.infinity_norm())) ? matrix.infinity_norm() : K(1.0);
+ Matrix scaledMatrix = matrix / maxAbsElement;
+ K r = Impl::eigenValues3dImpl(scaledMatrix, eigenValues);
+
+ if constexpr(Tag==EigenvaluesEigenvectors) {
+ K offDiagNorm = Vector{scaledMatrix[0][1],scaledMatrix[0][2],scaledMatrix[1][2]}.two_norm2();
+ if (offDiagNorm <= std::numeric_limits<K>::epsilon())
+ {
+ eigenValues = {scaledMatrix[0][0], scaledMatrix[1][1], scaledMatrix[2][2]};
+ eigenVectors = {{1.0, 0.0, 0.0}, {0.0, 1.0, 0.0}, {0.0, 0.0, 1.0}};
+
+ // Use bubble sort to jointly sort eigenvalues and eigenvectors
+ // such that eigenvalues are ascending
+ if (eigenValues[0] > eigenValues[1])
+ {
+ std::swap(eigenValues[0], eigenValues[1]);
+ std::swap(eigenVectors[0], eigenVectors[1]);
+ }
+ if (eigenValues[1] > eigenValues[2])
+ {
+ std::swap(eigenValues[1], eigenValues[2]);
+ std::swap(eigenVectors[1], eigenVectors[2]);
+ }
+ if (eigenValues[0] > eigenValues[1])
+ {
+ std::swap(eigenValues[0], eigenValues[1]);
+ std::swap(eigenVectors[0], eigenVectors[1]);
+ }
+ }
+ else {
+ /*Compute the eigenvectors so that the set
+ [evec[0], evec[1], evec[2]] is right handed and
+ orthonormal. */
+
+ Matrix evec(0.0);
+ Vector eval(eigenValues);
+ if(r >= 0) {
+ Impl::eig0(scaledMatrix, eval[2], evec[2]);
+ Impl::eig1(scaledMatrix, evec[2], evec[1], eval[1]);
+ evec[0] = Impl::crossProduct(evec[1], evec[2]);
+ }
+ else {
+ Impl::eig0(scaledMatrix, eval[0], evec[0]);
+ Impl::eig1(scaledMatrix, evec[0], evec[1], eval[1]);
+ evec[2] = Impl::crossProduct(evec[0], evec[1]);
+ }
+ //sort eval/evec-pairs in ascending order
+ using EVPair = std::pair<K, Vector>;
+ std::vector<EVPair> pairs;
+ for(std::size_t i=0; i<=2; ++i)
+ pairs.push_back(EVPair(eval[i], evec[i]));
+ auto comp = [](EVPair x, EVPair y){ return x.first < y.first; };
+ std::sort(pairs.begin(), pairs.end(), comp);
+ for(std::size_t i=0; i<=2; ++i){
+ eigenValues[i] = pairs[i].first;
+ eigenVectors[i] = pairs[i].second;
+ }
+ }
+ }
+ //The preconditioning scaled the matrix, which scales the eigenvalues. Revert the scaling.
+ eigenValues *= maxAbsElement;
+ }
+
+ // forwarding to LAPACK with corresponding tag
+ template <Jobs Tag, int dim, typename K>
+ static void eigenValuesVectorsLapackImpl(const FieldMatrix<K, dim, dim>& matrix,
+ FieldVector<K, dim>& eigenValues,
+ FieldMatrix<K, dim, dim>& eigenVectors)
+ {
+ {
+#if HAVE_LAPACK
+ /*Lapack uses a proprietary tag to determine whether both eigenvalues and
+ -vectors ('v') or only eigenvalues ('n') should be calculated */
+ const char jobz = "nv"[Tag];
+
+ const long int N = dim ;
+ const char uplo = 'u'; // use upper triangular matrix
+
+ // length of matrix vector, LWORK >= max(1,3*N-1)
+ const long int lwork = 3*N -1 ;
+
+ constexpr bool isKLapackType = std::is_same_v<K,double> || std::is_same_v<K,float>;
+ using LapackNumType = std::conditional_t<isKLapackType, K, double>;
+
+ // matrix to put into dsyev
+ LapackNumType matrixVector[dim * dim];
+
+ // copy matrix
+ int row = 0;
+ for(int i=0; i<dim; ++i)
+ {
+ for(int j=0; j<dim; ++j, ++row)
+ {
+ matrixVector[ row ] = matrix[ i ][ j ];
+ }
+ }
+
+ // working memory
+ LapackNumType workSpace[lwork];
+
+ // return value information
+ long int info = 0;
+ LapackNumType* ev;
+ if constexpr (isKLapackType){
+ ev = &eigenValues[0];
+ }else{
+ ev = new LapackNumType[dim];
+ }
+
+ // call LAPACK routine (see fmatrixev.cc)
+ eigenValuesLapackCall(&jobz, &uplo, &N, &matrixVector[0], &N,
+ ev, &workSpace[0], &lwork, &info);
+
+ if constexpr (!isKLapackType){
+ for(size_t i=0;i<dim;++i)
+ eigenValues[i] = ev[i];
+ delete[] ev;
+ }
+
+ // restore eigenvectors matrix
+ if (Tag==Jobs::EigenvaluesEigenvectors){
+ row = 0;
+ for(int i=0; i<dim; ++i)
+ {
+ for(int j=0; j<dim; ++j, ++row)
+ {
+ eigenVectors[ i ][ j ] = matrixVector[ row ];
+ }
+ }
+ }
+
+ if( info != 0 )
+ {
+ std::cerr << "For matrix " << matrix << " eigenvalue calculation failed! " << std::endl;
+ DUNE_THROW(InvalidStateException,"eigenValues: Eigenvalue calculation failed!");
+ }
+#else
+ DUNE_THROW(NotImplemented,"LAPACK not found!");
+#endif
+ }
+ }
+
+ // generic specialization
+ template <Jobs Tag, int dim, typename K>
+ static void eigenValuesVectorsImpl(const FieldMatrix<K, dim, dim>& matrix,
+ FieldVector<K, dim>& eigenValues,
+ FieldMatrix<K, dim, dim>& eigenVectors)
+ {
+ eigenValuesVectorsLapackImpl<Tag>(matrix,eigenValues,eigenVectors);
+ }
+ } //namespace Impl
+
+ /** \brief calculates the eigenvalues of a symmetric field matrix
+ \param[in] matrix matrix eigenvalues are calculated for
+ \param[out] eigenValues FieldVector that contains eigenvalues in
+ ascending order
+
+ \note specializations for dim=1,2,3 exist, for dim>3 LAPACK::dsyev is used
+ */
+ template <int dim, typename K>
+ static void eigenValues(const FieldMatrix<K, dim, dim>& matrix,
+ FieldVector<K ,dim>& eigenValues)
+ {
+ Impl::EVDummy<K,dim> dummy;
+ Impl::eigenValuesVectorsImpl<Impl::Jobs::OnlyEigenvalues>(matrix, eigenValues, dummy);
+ }
+
+ /** \brief calculates the eigenvalues and eigenvectors of a symmetric field matrix
+ \param[in] matrix matrix eigenvalues are calculated for
+ \param[out] eigenValues FieldVector that contains eigenvalues in
+ ascending order
+ \param[out] eigenVectors FieldMatrix that contains the eigenvectors
+
+ \note specializations for dim=1,2,3 exist, for dim>3 LAPACK::dsyev is used
+ */
+ template <int dim, typename K>
+ static void eigenValuesVectors(const FieldMatrix<K, dim, dim>& matrix,
+ FieldVector<K ,dim>& eigenValues,
+ FieldMatrix<K, dim, dim>& eigenVectors)
+ {
+ Impl::eigenValuesVectorsImpl<Impl::Jobs::EigenvaluesEigenvectors>(matrix, eigenValues, eigenVectors);
+ }
+
+ /** \brief calculates the eigenvalues of a symmetric field matrix
+ \param[in] matrix matrix eigenvalues are calculated for
+ \param[out] eigenValues FieldVector that contains eigenvalues in
+ ascending order
+
+ \note LAPACK::dsyev is used to calculate the eigenvalues
+ */
+ template <int dim, typename K>
+ static void eigenValuesLapack(const FieldMatrix<K, dim, dim>& matrix,
+ FieldVector<K, dim>& eigenValues)
+ {
+ Impl::EVDummy<K,dim> dummy;
+ Impl::eigenValuesVectorsLapackImpl<Impl::Jobs::EigenvaluesEigenvectors>(matrix, eigenValues, dummy);
+ }
+
+ /** \brief calculates the eigenvalues and -vectors of a symmetric field matrix
+ \param[in] matrix matrix eigenvalues are calculated for
+ \param[out] eigenValues FieldVector that contains eigenvalues in
+ ascending order
+ \param[out] eigenVectors FieldMatrix that contains the eigenvectors
+
+ \note LAPACK::dsyev is used to calculate the eigenvalues and -vectors
+ */
+ template <int dim, typename K>
+ static void eigenValuesVectorsLapack(const FieldMatrix<K, dim, dim>& matrix,
+ FieldVector<K, dim>& eigenValues,
+ FieldMatrix<K, dim, dim>& eigenVectors)
+ {
+ Impl::eigenValuesVectorsLapackImpl<Impl::Jobs::EigenvaluesEigenvectors>(matrix, eigenValues, eigenVectors);
+ }
+
+ /** \brief calculates the eigenvalues of a non-symmetric field matrix
+ \param[in] matrix matrix eigenvalues are calculated for
+ \param[out] eigenValues FieldVector that contains eigenvalues in
+ ascending order
+
+ \note LAPACK::dgeev is used to calculate the eigenvalues
+ */
+ template <int dim, typename K, class C>
+ static void eigenValuesNonSym(const FieldMatrix<K, dim, dim>& matrix,
+ FieldVector<C, dim>& eigenValues)
+ {
+#if HAVE_LAPACK
+ {
+ const long int N = dim ;
+ const char jobvl = 'n';
+ const char jobvr = 'n';
+
+ constexpr bool isKLapackType = std::is_same_v<K,double> || std::is_same_v<K,float>;
+ using LapackNumType = std::conditional_t<isKLapackType, K, double>;
+
+ // matrix to put into dgeev
+ LapackNumType matrixVector[dim * dim];
+
+ // copy matrix
+ int row = 0;
+ for(int i=0; i<dim; ++i)
+ {
+ for(int j=0; j<dim; ++j, ++row)
+ {
+ matrixVector[ row ] = matrix[ i ][ j ];
+ }
+ }
+
+ // working memory
+ LapackNumType eigenR[dim];
+ LapackNumType eigenI[dim];
+ LapackNumType work[3*dim];
+
+ // return value information
+ long int info = 0;
+ const long int lwork = 3*dim;
+
+ // call LAPACK routine (see fmatrixev_ext.cc)
+ eigenValuesNonsymLapackCall(&jobvl, &jobvr, &N, &matrixVector[0], &N,
+ &eigenR[0], &eigenI[0], nullptr, &N, nullptr, &N, &work[0],
+ &lwork, &info);
+
+ if( info != 0 )
+ {
+ std::cerr << "For matrix " << matrix << " eigenvalue calculation failed! " << std::endl;
+ DUNE_THROW(InvalidStateException,"eigenValues: Eigenvalue calculation failed!");
+ }
+ for (int i=0; i<N; ++i) {
+ eigenValues[i].real = eigenR[i];
+ eigenValues[i].imag = eigenI[i];
+ }
+ }
+#else
+ DUNE_THROW(NotImplemented,"LAPACK not found!");
+#endif
+ }
+ } // end namespace FMatrixHelp
+
+ /** @} end documentation */
+
+} // end namespace Dune
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_FTRAITS_HH
+#define DUNE_FTRAITS_HH
+
+/** \file
+ * \brief Type traits to determine the type of reals (when working with complex numbers)
+ */
+
+#include <complex>
+#include <vector>
+
+namespace Dune {
+
+ /**
+ @addtogroup DenseMatVec
+ \brief Type traits to retrieve the field and the real type of classes
+
+ Type traits to retrieve the field and the real type of classes
+ e.g. that of FieldVector or FieldMatrix
+ */
+ template<class T>
+ struct FieldTraits
+ {
+ //! export the type representing the field
+ typedef T field_type;
+ //! export the type representing the real type of the field
+ typedef T real_type;
+ };
+
+ template<class T>
+ struct FieldTraits<const T>
+ {
+ typedef typename FieldTraits<T>::field_type field_type;
+ typedef typename FieldTraits<T>::real_type real_type;
+ };
+
+ template<class T>
+ struct FieldTraits< std::complex<T> >
+ {
+ typedef std::complex<T> field_type;
+ typedef T real_type;
+ };
+
+ template<class T, unsigned int N>
+ struct FieldTraits< T[N] >
+ {
+ typedef typename FieldTraits<T>::field_type field_type;
+ typedef typename FieldTraits<T>::real_type real_type;
+ };
+
+ template<class T>
+ struct FieldTraits< std::vector<T> >
+ {
+ typedef typename FieldTraits<T>::field_type field_type;
+ typedef typename FieldTraits<T>::real_type real_type;
+ };
+
+} // end namespace Dune
+
+#endif // DUNE_FTRAITS_HH
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_FUNCTION_HH_SILENCE_DEPRECATION
+#warning This file is deprecated after Dune 2.7! Use C++ function objects and std::function stuff instead!
+#else // !DUNE_FUNCTION_HH_SILENCE_DEPRECATION
+#undef DUNE_FUNCTION_HH_SILENCE_DEPRECATION
+#endif // !DUNE_FUNCTION_HH_SILENCE_DEPRECATION
+
+#ifndef DUNE_FUNCTION_HH
+#define DUNE_FUNCTION_HH
+
+#include <utility>
+
+#include <dune/common/deprecated.hh>
+#include "typetraits.hh"
+
+namespace Dune {
+
+ /** @addtogroup Common
+ @{
+ */
+
+ /*! \file
+ \brief Simple base class templates for functions.
+ */
+
+ /**
+ * \brief Base class template for function classes
+ *
+ * \tparam Domain Type of input variable. This could be some 'const T' or 'const T&'.
+ *
+ * \tparam Range Type of output variable. This should be some non-const 'T&' to allow to return results.
+ */
+ template <class Domain, class Range>
+ class
+ [[deprecated("Dune::Function is deprecated after Dune 2.7. Use C++ "
+ "function objects instead!")]]
+ Function
+ {
+ typedef typename std::remove_cv<typename std::remove_reference< Domain >::type >::type RawDomainType;
+ typedef typename std::remove_cv<typename std::remove_reference< Range >::type >::type RawRangeType;
+
+ public:
+
+ //! Raw type of input variable with removed reference and constness
+ typedef RawRangeType RangeType;
+
+ //! Raw type of output variable with removed reference and constness
+ typedef RawDomainType DomainType;
+
+ //! Traits class containing raw types
+ struct Traits
+ {
+ typedef RawDomainType DomainType;
+ typedef RawRangeType RangeType;
+ };
+
+ /**
+ * \brief Function evaluation.
+ *
+ * \param x Argument for function evaluation.
+ * \param y Result of function evaluation.
+ */
+ void evaluate(const typename Traits::DomainType& x, typename Traits::RangeType& y) const;
+ }; // end of Function class
+
+
+
+ DUNE_NO_DEPRECATED_BEGIN
+ /**
+ * \brief Virtual base class template for function classes.
+ *
+ * \see makeVirtualFunction for a helper to convert lambda functions to
+ * `VirtualFunction` objects.
+ *
+ * \tparam DomainType The type of the input variable is 'const DomainType &'
+ *
+ * \tparam RangeType The type of the output variable is 'RangeType &'
+ */
+ template <class DomainType, class RangeType>
+ class
+ [[deprecated("Dune::VirtualFunction is deprecated after Dune 2.7. Use C++ "
+ "function objects and std::function instead!")]]
+ VirtualFunction : public Function<const DomainType&, RangeType&>
+ {
+ public:
+ typedef typename Function<const DomainType&, RangeType&>::Traits Traits;
+
+ virtual ~VirtualFunction() {}
+ /**
+ * \brief Function evaluation.
+ *
+ * \param x Argument for function evaluation.
+ * \param y Result of function evaluation.
+ */
+ virtual void evaluate(const typename Traits::DomainType& x, typename Traits::RangeType& y) const = 0;
+ }; // end of VirtualFunction class
+ DUNE_NO_DEPRECATED_END
+
+ namespace Impl {
+
+ DUNE_NO_DEPRECATED_BEGIN
+ template<typename Domain, typename Range, typename F>
+ class LambdaVirtualFunction final
+ : public VirtualFunction<Domain, Range>
+ {
+ public:
+ LambdaVirtualFunction(F&& f)
+ : f_(std::move(f))
+ {}
+
+ LambdaVirtualFunction(const F& f)
+ : f_(f)
+ {}
+
+ void evaluate(const Domain& x, Range& y) const override
+ {
+ y = f_(x);
+ }
+
+ private:
+ const F f_;
+ };
+ DUNE_NO_DEPRECATED_END
+
+ } /* namespace Impl */
+
+ /**
+ * \brief make `VirtualFunction` out of a function object
+ *
+ * This helper function wraps a function object into a class implementing
+ * the `VirtualFunction` interface. It allows for easy use of lambda
+ * expressions in places that expect a `VirtualFunction`:
+ \code
+ void doSomething(const VirtualFunction<double, double>& f);
+
+ auto f = makeVirtualFunction<double, double>(
+ [](double x) { return x*x; });
+ doSomething(f);
+ \endcode
+ *
+ * \returns object of a class derived from `VirtualFunction<Domain, Range>`
+ *
+ * \tparam Domain domain of the function
+ * \tparam Range range of the function
+ */
+ template<typename Domain, typename Range, typename F>
+ [[deprecated("Dune::LambdaVirtualFunction is deprecated after Dune 2.7. "
+ "Use std::function instead!")]]
+ Impl::LambdaVirtualFunction< Domain, Range, std::decay_t<F> >
+ makeVirtualFunction(F&& f)
+ {
+ return {std::forward<F>(f)};
+ }
+
+ /** @} end documentation */
+
+} // end namespace
+
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_FVECTOR_HH
+#define DUNE_FVECTOR_HH
+
+#include <array>
+#include <cmath>
+#include <cstddef>
+#include <cstdlib>
+#include <complex>
+#include <cstring>
+#include <utility>
+#include <initializer_list>
+#include <algorithm>
+
+#include "typetraits.hh"
+#include "exceptions.hh"
+
+#include "ftraits.hh"
+#include "densevector.hh"
+#include "boundschecking.hh"
+
+#include <dune/common/math.hh>
+#include <dune/common/promotiontraits.hh>
+
+namespace Dune {
+
+ /** @addtogroup DenseMatVec
+ @{
+ */
+
+ /*! \file
+ * \brief Implements a vector constructed from a given type
+ representing a field and a compile-time given size.
+ */
+
+ template< class K, int SIZE > class FieldVector;
+ template< class K, int SIZE >
+ struct DenseMatVecTraits< FieldVector<K,SIZE> >
+ {
+ typedef FieldVector<K,SIZE> derived_type;
+ typedef std::array<K,SIZE> container_type;
+ typedef K value_type;
+ typedef typename container_type::size_type size_type;
+ };
+
+ template< class K, int SIZE >
+ struct FieldTraits< FieldVector<K,SIZE> >
+ {
+ typedef typename FieldTraits<K>::field_type field_type;
+ typedef typename FieldTraits<K>::real_type real_type;
+ };
+
+ /**
+ * @brief TMP to check the size of a DenseVectors statically, if possible.
+ *
+ * If the implementation type of C is a FieldVector, we statically check
+ * whether its dimension is SIZE.
+ * @tparam C The implementation of the other DenseVector
+ * @tparam SIZE The size we need assume.
+ */
+ template<typename C, int SIZE>
+ struct IsFieldVectorSizeCorrect
+ {
+ enum {
+ /**
+ *@param True if C is not of type FieldVector or its dimension
+ * is not equal SIZE.
+ */
+ value = true
+ };
+ };
+
+ template<typename T, int SIZE>
+ struct IsFieldVectorSizeCorrect<FieldVector<T,SIZE>,SIZE>
+ {
+ enum {value = true};
+ };
+
+ template<typename T, int SIZE, int SIZE1>
+ struct IsFieldVectorSizeCorrect<FieldVector<T,SIZE1>,SIZE>
+ {
+ enum {value = false};
+ };
+
+
+ /** \brief vector space out of a tensor product of fields.
+ *
+ * \tparam K the field type (use float, double, complex, etc)
+ * \tparam SIZE number of components.
+ */
+ template< class K, int SIZE >
+ class FieldVector :
+ public DenseVector< FieldVector<K,SIZE> >
+ {
+ std::array<K,SIZE> _data;
+ typedef DenseVector< FieldVector<K,SIZE> > Base;
+ public:
+ //! export size
+ enum {
+ //! The size of this vector.
+ dimension = SIZE
+ };
+
+ typedef typename Base::size_type size_type;
+ typedef typename Base::value_type value_type;
+
+ /** \brief The type used for references to the vector entry */
+ typedef value_type& reference;
+
+ /** \brief The type used for const references to the vector entry */
+ typedef const value_type& const_reference;
+
+ //! Constructor making default-initialized vector
+ constexpr FieldVector()
+ : _data{{}}
+ {}
+
+ //! Constructor making vector with identical coordinates
+ explicit FieldVector (const K& t)
+ {
+ std::fill(_data.begin(),_data.end(),t);
+ }
+
+#if __GNUC__ == 5 && !defined(__clang__)
+ // `... = default;` causes an internal compiler error on GCC 5.4 (Ubuntu 16.04)
+ //! copy constructor
+ FieldVector(const FieldVector& x) : _data(x._data) {}
+#else
+ //! Copy constructor
+ FieldVector (const FieldVector&) = default;
+#endif
+
+ /** \brief Construct from a std::initializer_list */
+ FieldVector (std::initializer_list<K> const &l)
+ {
+ assert(l.size() == dimension);// Actually, this is not needed any more!
+ std::copy_n(l.begin(), std::min(static_cast<std::size_t>(dimension),
+ l.size()),
+ _data.begin());
+ }
+
+ //! copy assignment operator
+ FieldVector& operator= (const FieldVector&) = default;
+
+ template <typename T>
+ FieldVector& operator= (const FieldVector<T, SIZE>& x)
+ {
+ std::copy_n(x.begin(), SIZE, _data.begin());
+ return *this;
+ }
+
+ template<typename T, int N>
+ FieldVector& operator=(const FieldVector<T, N>&) = delete;
+
+ /**
+ * \brief Copy constructor from a second vector of possibly different type
+ *
+ * If the DenseVector type of the this constructor's argument
+ * is implemented by a FieldVector, it is statically checked
+ * if it has the correct size. If this is not the case
+ * the constructor is removed from the overload set using SFINAE.
+ *
+ * \param[in] x A DenseVector with correct size.
+ * \param[in] dummy A void* dummy argument needed by SFINAE.
+ */
+ template<class C>
+ FieldVector (const DenseVector<C> & x,
+ [[maybe_unused]] typename std::enable_if<IsFieldVectorSizeCorrect<C,SIZE>::value>::type* dummy=0)
+ {
+ // do a run-time size check, for the case that x is not a FieldVector
+ assert(x.size() == SIZE); // Actually this is not needed any more!
+ std::copy_n(x.begin(), std::min(static_cast<std::size_t>(SIZE),x.size()), _data.begin());
+ }
+
+ //! Constructor making vector with identical coordinates
+ template<class K1>
+ explicit FieldVector (const FieldVector<K1,SIZE> & x)
+ {
+ std::copy_n(x.begin(), SIZE, _data.begin());
+ }
+
+ template<typename T, int N>
+ explicit FieldVector(const FieldVector<T, N>&) = delete;
+
+ using Base::operator=;
+
+ // make this thing a vector
+ static constexpr size_type size () { return SIZE; }
+
+ K & operator[](size_type i) {
+ DUNE_ASSERT_BOUNDS(i < SIZE);
+ return _data[i];
+ }
+ const K & operator[](size_type i) const {
+ DUNE_ASSERT_BOUNDS(i < SIZE);
+ return _data[i];
+ }
+
+ //! return pointer to underlying array
+ K* data() noexcept
+ {
+ return _data.data();
+ }
+
+ //! return pointer to underlying array
+ const K* data() const noexcept
+ {
+ return _data.data();
+ }
+
+ //! vector space multiplication with scalar
+ template <class Scalar,
+ std::enable_if_t<IsNumber<Scalar>::value, int> = 0>
+ friend auto operator* ( const FieldVector& vector, Scalar scalar)
+ {
+ FieldVector<typename PromotionTraits<value_type,Scalar>::PromotedType,SIZE> result;
+
+ for (size_type i = 0; i < vector.size(); ++i)
+ result[i] = vector[i] * scalar;
+
+ return result;
+ }
+
+ //! vector space multiplication with scalar
+ template <class Scalar,
+ std::enable_if_t<IsNumber<Scalar>::value, int> = 0>
+ friend auto operator* ( Scalar scalar, const FieldVector& vector)
+ {
+ FieldVector<typename PromotionTraits<value_type,Scalar>::PromotedType,SIZE> result;
+
+ for (size_type i = 0; i < vector.size(); ++i)
+ result[i] = scalar * vector[i];
+
+ return result;
+ }
+
+ //! vector space division by scalar
+ template <class Scalar,
+ std::enable_if_t<IsNumber<Scalar>::value, int> = 0>
+ friend auto operator/ ( const FieldVector& vector, Scalar scalar)
+ {
+ FieldVector<typename PromotionTraits<value_type,Scalar>::PromotedType,SIZE> result;
+
+ for (size_type i = 0; i < vector.size(); ++i)
+ result[i] = vector[i] / scalar;
+
+ return result;
+ }
+
+ };
+
+ /** \brief Read a FieldVector from an input stream
+ * \relates FieldVector
+ *
+ * \note This operator is STL compliant, i.e., the content of v is only
+ * changed if the read operation is successful.
+ *
+ * \param[in] in std :: istream to read from
+ * \param[out] v FieldVector to be read
+ *
+ * \returns the input stream (in)
+ */
+ template<class K, int SIZE>
+ inline std::istream &operator>> ( std::istream &in,
+ FieldVector<K, SIZE> &v )
+ {
+ FieldVector<K, SIZE> w;
+ for( typename FieldVector<K, SIZE>::size_type i = 0; i < SIZE; ++i )
+ in >> w[ i ];
+ if(in)
+ v = w;
+ return in;
+ }
+
+#ifndef DOXYGEN
+ template< class K >
+ struct DenseMatVecTraits< FieldVector<K,1> >
+ {
+ typedef FieldVector<K,1> derived_type;
+ typedef K container_type;
+ typedef K value_type;
+ typedef size_t size_type;
+ };
+
+ /** \brief Vectors containing only one component
+ */
+ template<class K>
+ class FieldVector<K, 1> :
+ public DenseVector< FieldVector<K,1> >
+ {
+ K _data;
+ typedef DenseVector< FieldVector<K,1> > Base;
+ public:
+ //! export size
+ enum {
+ //! The size of this vector.
+ dimension = 1
+ };
+
+ typedef typename Base::size_type size_type;
+
+ /** \brief The type used for references to the vector entry */
+ typedef K& reference;
+
+ /** \brief The type used for const references to the vector entry */
+ typedef const K& const_reference;
+
+ //===== construction
+
+ /** \brief Default constructor */
+ constexpr FieldVector ()
+ : _data()
+ {}
+
+ /** \brief Constructor with a given scalar */
+ template<typename T,
+ typename EnableIf = typename std::enable_if<
+ std::is_convertible<T, K>::value &&
+ ! std::is_base_of<DenseVector<typename FieldTraits<T>::field_type>, K
+ >::value
+ >::type
+ >
+ FieldVector (const T& k) : _data(k) {}
+
+ //! Constructor from static vector of different type
+ template<class C,
+ std::enable_if_t<
+ std::is_assignable<K&, typename DenseVector<C>::value_type>::value, int> = 0>
+ FieldVector (const DenseVector<C> & x)
+ {
+ static_assert(((bool)IsFieldVectorSizeCorrect<C,1>::value), "FieldVectors do not match in dimension!");
+ assert(x.size() == 1);
+ _data = x[0];
+ }
+
+ //! copy constructor
+ FieldVector(const FieldVector&) = default;
+
+ //! copy assignment operator
+ FieldVector& operator=(const FieldVector&) = default;
+
+ template <typename T>
+ FieldVector& operator= (const FieldVector<T, 1>& other)
+ {
+ _data = other[0];
+ return *this;
+ }
+
+ template<typename T, int N>
+ FieldVector& operator=(const FieldVector<T, N>&) = delete;
+
+ /** \brief Construct from a std::initializer_list */
+ FieldVector (std::initializer_list<K> const &l)
+ {
+ assert(l.size() == 1);
+ _data = *l.begin();
+ }
+
+ //! Assignment operator for scalar
+ template<typename T,
+ typename EnableIf = typename std::enable_if<
+ std::is_assignable<K&, T>::value &&
+ ! std::is_base_of<DenseVector<typename FieldTraits<T>::field_type>, K
+ >::value
+ >::type
+ >
+ inline FieldVector& operator= (const T& k)
+ {
+ _data = k;
+ return *this;
+ }
+
+ //===== forward methods to container
+ static constexpr size_type size () { return 1; }
+
+ K & operator[]([[maybe_unused]] size_type i)
+ {
+ DUNE_ASSERT_BOUNDS(i == 0);
+ return _data;
+ }
+ const K & operator[]([[maybe_unused]] size_type i) const
+ {
+ DUNE_ASSERT_BOUNDS(i == 0);
+ return _data;
+ }
+
+ //! return pointer to underlying array
+ K* data() noexcept
+ {
+ return &_data;
+ }
+
+ //! return pointer to underlying array
+ const K* data() const noexcept
+ {
+ return &_data;
+ }
+
+ //===== conversion operator
+
+ /** \brief Conversion operator */
+ operator K& () { return _data; }
+
+ /** \brief Const conversion operator */
+ operator const K& () const { return _data; }
+ };
+
+ /* ----- FV / FV ----- */
+ /* mostly not necessary as these operations are already covered via the cast operator */
+
+ //! Binary compare, when using FieldVector<K,1> like K
+ template<class K>
+ inline bool operator> (const FieldVector<K,1>& a, const FieldVector<K,1>& b)
+ {
+ return a[0]>b[0];
+ }
+
+ //! Binary compare, when using FieldVector<K,1> like K
+ template<class K>
+ inline bool operator>= (const FieldVector<K,1>& a, const FieldVector<K,1>& b)
+ {
+ return a[0]>=b[0];
+ }
+
+ //! Binary compare, when using FieldVector<K,1> like K
+ template<class K>
+ inline bool operator< (const FieldVector<K,1>& a, const FieldVector<K,1>& b)
+ {
+ return a[0]<b[0];
+ }
+
+ //! Binary compare, when using FieldVector<K,1> like K
+ template<class K>
+ inline bool operator<= (const FieldVector<K,1>& a, const FieldVector<K,1>& b)
+ {
+ return a[0]<=b[0];
+ }
+
+ /* ----- FV / scalar ----- */
+
+ //! Binary addition, when using FieldVector<K,1> like K
+ template<class K>
+ inline FieldVector<K,1> operator+ (const FieldVector<K,1>& a, const K b)
+ {
+ return a[0]+b;
+ }
+
+ //! Binary subtraction, when using FieldVector<K,1> like K
+ template<class K>
+ inline FieldVector<K,1> operator- (const FieldVector<K,1>& a, const K b)
+ {
+ return a[0]-b;
+ }
+
+ //! Binary multiplication, when using FieldVector<K,1> like K
+ template<class K>
+ inline FieldVector<K,1> operator* (const FieldVector<K,1>& a, const K b)
+ {
+ return a[0]*b;
+ }
+
+ //! Binary division, when using FieldVector<K,1> like K
+ template<class K>
+ inline FieldVector<K,1> operator/ (const FieldVector<K,1>& a, const K b)
+ {
+ return a[0]/b;
+ }
+
+ //! Binary compare, when using FieldVector<K,1> like K
+ template<class K>
+ inline bool operator> (const FieldVector<K,1>& a, const K b)
+ {
+ return a[0]>b;
+ }
+
+ //! Binary compare, when using FieldVector<K,1> like K
+ template<class K>
+ inline bool operator>= (const FieldVector<K,1>& a, const K b)
+ {
+ return a[0]>=b;
+ }
+
+ //! Binary compare, when using FieldVector<K,1> like K
+ template<class K>
+ inline bool operator< (const FieldVector<K,1>& a, const K b)
+ {
+ return a[0]<b;
+ }
+
+ //! Binary compare, when using FieldVector<K,1> like K
+ template<class K>
+ inline bool operator<= (const FieldVector<K,1>& a, const K b)
+ {
+ return a[0]<=b;
+ }
+
+ //! Binary compare, when using FieldVector<K,1> like K
+ template<class K>
+ inline bool operator== (const FieldVector<K,1>& a, const K b)
+ {
+ return a[0]==b;
+ }
+
+ //! Binary compare, when using FieldVector<K,1> like K
+ template<class K>
+ inline bool operator!= (const FieldVector<K,1>& a, const K b)
+ {
+ return a[0]!=b;
+ }
+
+ /* ----- scalar / FV ------ */
+
+ //! Binary addition, when using FieldVector<K,1> like K
+ template<class K>
+ inline FieldVector<K,1> operator+ (const K a, const FieldVector<K,1>& b)
+ {
+ return a+b[0];
+ }
+
+ //! Binary subtraction, when using FieldVector<K,1> like K
+ template<class K>
+ inline FieldVector<K,1> operator- (const K a, const FieldVector<K,1>& b)
+ {
+ return a-b[0];
+ }
+
+ //! Binary multiplication, when using FieldVector<K,1> like K
+ template<class K>
+ inline FieldVector<K,1> operator* (const K a, const FieldVector<K,1>& b)
+ {
+ return a*b[0];
+ }
+
+ //! Binary division, when using FieldVector<K,1> like K
+ template<class K>
+ inline FieldVector<K,1> operator/ (const K a, const FieldVector<K,1>& b)
+ {
+ return a/b[0];
+ }
+
+ //! Binary compare, when using FieldVector<K,1> like K
+ template<class K>
+ inline bool operator> (const K a, const FieldVector<K,1>& b)
+ {
+ return a>b[0];
+ }
+
+ //! Binary compare, when using FieldVector<K,1> like K
+ template<class K>
+ inline bool operator>= (const K a, const FieldVector<K,1>& b)
+ {
+ return a>=b[0];
+ }
+
+ //! Binary compare, when using FieldVector<K,1> like K
+ template<class K>
+ inline bool operator< (const K a, const FieldVector<K,1>& b)
+ {
+ return a<b[0];
+ }
+
+ //! Binary compare, when using FieldVector<K,1> like K
+ template<class K>
+ inline bool operator<= (const K a, const FieldVector<K,1>& b)
+ {
+ return a<=b[0];
+ }
+
+ //! Binary compare, when using FieldVector<K,1> like K
+ template<class K>
+ inline bool operator== (const K a, const FieldVector<K,1>& b)
+ {
+ return a==b[0];
+ }
+
+ //! Binary compare, when using FieldVector<K,1> like K
+ template<class K>
+ inline bool operator!= (const K a, const FieldVector<K,1>& b)
+ {
+ return a!=b[0];
+ }
+#endif
+
+ /* Overloads for common classification functions */
+ namespace MathOverloads {
+
+ // ! Returns whether all entries are finite
+ template<class K, int SIZE>
+ auto isFinite(const FieldVector<K,SIZE> &b, PriorityTag<2>, ADLTag) {
+ bool out = true;
+ for(int i=0; i<SIZE; i++) {
+ out &= Dune::isFinite(b[i]);
+ }
+ return out;
+ }
+
+ // ! Returns whether any entry is infinite
+ template<class K, int SIZE>
+ bool isInf(const FieldVector<K,SIZE> &b, PriorityTag<2>, ADLTag) {
+ bool out = false;
+ for(int i=0; i<SIZE; i++) {
+ out |= Dune::isInf(b[i]);
+ }
+ return out;
+ }
+
+ // ! Returns whether any entry is NaN
+ template<class K, int SIZE, typename = std::enable_if_t<HasNaN<K>::value>>
+ bool isNaN(const FieldVector<K,SIZE> &b, PriorityTag<2>, ADLTag) {
+ bool out = false;
+ for(int i=0; i<SIZE; i++) {
+ out |= Dune::isNaN(b[i]);
+ }
+ return out;
+ }
+
+ // ! Returns true if either b or c is NaN
+ template<class K, typename = std::enable_if_t<HasNaN<K>::value>>
+ bool isUnordered(const FieldVector<K,1> &b, const FieldVector<K,1> &c,
+ PriorityTag<2>, ADLTag) {
+ return Dune::isUnordered(b[0],c[0]);
+ }
+ } //MathOverloads
+
+ /** @} end documentation */
+
+} // end namespace
+
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_GCD_HH
+#define DUNE_GCD_HH
+#warning "This header is deprecated and will be removed after Dune release 2.8. Use std::gcd instead"
+
+#include <numeric>
+
+namespace Dune
+{
+ /**
+ * @brief Calculator of the greatest common divisor.
+ */
+ template<long a, long b>
+ struct [[deprecated("Will be removed after Dune 2.8. Use std::gcd from <numeric> instead!")]] Gcd
+ {
+ /**
+ * @brief The greatest common divisior of a and b. */
+ constexpr static long value = std::gcd(a,b);
+ };
+
+ /**
+ * @}
+ */
+}
+
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_GENERICITERATOR_HH
+#define DUNE_GENERICITERATOR_HH
+
+#include <dune/common/iteratorfacades.hh>
+#include <cassert>
+
+namespace Dune {
+
+ /*! \defgroup GenericIterator GenericIterator
+ \ingroup IteratorFacades
+
+ \brief Generic Iterator class for writing stl conformant iterators
+ for any container class with operator[]
+
+ Using this template class you can create an iterator and a const_iterator
+ for any container class.
+
+ Imagine you have SimpleContainer and would like to have an iterator.
+ All you have to do is provide operator[], begin() and end()
+ (for const and for non-const).
+
+ \code
+ template<class T>
+ class SimpleContainer{
+ public:
+ typedef GenericIterator<SimpleContainer<T>,T> iterator;
+
+ typedef GenericIterator<const SimpleContainer<T>,const T> const_iterator;
+
+ SimpleContainer(){
+ for(int i=0; i < 100; i++)
+ values_[i]=i;
+ }
+
+ iterator begin(){
+ return iterator(*this, 0);
+ }
+
+ const_iterator begin() const{
+ return const_iterator(*this, 0);
+ }
+
+ iterator end(){
+ return iterator(*this, 100);
+ }
+
+ const_iterator end() const{
+ return const_iterator(*this, 100);
+ }
+
+ T& operator[](int i){
+ return values_[i];
+ }
+
+ const T& operator[](int i) const{
+ return values_[i];
+ }
+ private:
+ T values_[100];
+ };
+ \endcode
+
+ See dune/common/test/iteratorfacestest.hh for details or
+ Dune::QuadratureDefault in dune/quadrature/quadrature.hh
+ for a real example.
+ */
+
+ /**
+ * @file
+ * @brief Implements a generic iterator class for writing stl conformant iterators.
+ *
+ * Using this generic iterator writing iterators for containers
+ * that implement operator[] is only a matter of seconds.
+ */
+
+ /**
+ \brief Get the 'const' version of a reference to a mutable object
+
+ Given a reference R=T& const_reference<R>::type gives you the typedef for const T&
+ */
+ template<class R>
+ struct const_reference
+ {
+ typedef const R type;
+ };
+
+ template<class R>
+ struct const_reference<const R>
+ {
+ typedef const R type;
+ };
+
+ template<class R>
+ struct const_reference<R&>
+ {
+ typedef const R& type;
+ };
+
+ template<class R>
+ struct const_reference<const R&>
+ {
+ typedef const R& type;
+ };
+
+ /**
+ \brief get the 'mutable' version of a reference to a const object
+
+ given a const reference R=const T& mutable_reference<R>::type gives you the typedef for T&
+ */
+ template<class R>
+ struct mutable_reference
+ {
+ typedef R type;
+ };
+
+ template<class R>
+ struct mutable_reference<const R>
+ {
+ typedef R type;
+ };
+
+ template<class R>
+ struct mutable_reference<R&>
+ {
+ typedef R& type;
+ };
+
+ template<class R>
+ struct mutable_reference<const R&>
+ {
+ typedef R& type;
+ };
+
+ /** @addtogroup GenericIterator
+ *
+ * @{
+ */
+
+ /**
+ * @brief Generic class for stl-conforming iterators for container classes with operator[].
+ *
+ * If template parameter C has a const qualifier we are a const iterator, otherwise we
+ * are a mutable iterator.
+ */
+ template<class C, class T, class R=T&, class D = std::ptrdiff_t,
+ template<class,class,class,class> class IteratorFacade=RandomAccessIteratorFacade>
+ class GenericIterator :
+ public IteratorFacade<GenericIterator<C,T,R,D,IteratorFacade>,T,R,D>
+ {
+ friend class GenericIterator<typename std::remove_const<C>::type, typename std::remove_const<T>::type, typename mutable_reference<R>::type, D, IteratorFacade>;
+ friend class GenericIterator<const typename std::remove_const<C>::type, const typename std::remove_const<T>::type, typename const_reference<R>::type, D, IteratorFacade>;
+
+ typedef GenericIterator<typename std::remove_const<C>::type, typename std::remove_const<T>::type, typename mutable_reference<R>::type, D, IteratorFacade> MutableIterator;
+ typedef GenericIterator<const typename std::remove_const<C>::type, const typename std::remove_const<T>::type, typename const_reference<R>::type, D, IteratorFacade> ConstIterator;
+
+ public:
+
+ /**
+ * @brief The type of container we are an iterator for.
+ *
+ * The container type must provide an operator[] method.
+ *
+ * If C has a const qualifier we are a const iterator, otherwise we
+ * are a mutable iterator.
+ */
+ typedef C Container;
+
+ /**
+ * @brief The value type of the iterator.
+ *
+ * This is the return type when dereferencing the iterator.
+ */
+ typedef T Value;
+
+ /**
+ * @brief The type of the difference between two positions.
+ */
+ typedef D DifferenceType;
+
+ /**
+ * @brief The type of the reference to the values accessed.
+ */
+ typedef R Reference;
+
+ // Constructors needed by the base iterators
+ GenericIterator() : container_(0), position_(0)
+ {}
+
+ /**
+ * @brief Constructor
+ * @param cont Reference to the container we are an iterator for
+ * @param pos The position the iterator will be positioned to
+ * (e.g. 0 for an iterator returned by Container::begin() or
+ * the size of the container for an iterator returned by Container::end()
+ */
+ GenericIterator(Container& cont, DifferenceType pos)
+ : container_(&cont), position_(pos)
+ {}
+
+ /**
+ * @brief Copy constructor
+ *
+ * This is somehow hard to understand, therefore play with the cases:
+ * 1. if we are mutable this is the only valid copy constructor, as the argument is a mutable iterator
+ * 2. if we are a const iterator the argument is a mutable iterator => This is the needed conversion to initialize a const iterator from a mutable one.
+ */
+ GenericIterator(const MutableIterator& other) : container_(other.container_), position_(other.position_)
+ {}
+
+ /**
+ * @brief Copy constructor
+ *
+ * @warning Calling this method results in a compiler error, if this is a mutable iterator.
+ *
+ * This is somehow hard to understand, therefore play with the cases:
+ * 1. if we are mutable the arguments is a const iterator and therefore calling this method is mistake in the user's code and results in a (probably not understandable) compiler error
+ * 2. If we are a const iterator this is the default copy constructor as the argument is a const iterator too.
+ */
+ GenericIterator(const ConstIterator& other) : container_(other.container_), position_(other.position_)
+ {}
+
+ // Methods needed by the forward iterator
+ bool equals(const MutableIterator & other) const
+ {
+ return position_ == other.position_ && container_ == other.container_;
+ }
+
+ bool equals(const ConstIterator & other) const
+ {
+ return position_ == other.position_ && container_ == other.container_;
+ }
+
+ Reference dereference() const {
+ return container_->operator[](position_);
+ }
+
+ void increment(){
+ ++position_;
+ }
+
+ // Additional function needed by BidirectionalIterator
+ void decrement(){
+ --position_;
+ }
+
+ // Additional function needed by RandomAccessIterator
+ Reference elementAt(DifferenceType i) const {
+ return container_->operator[](position_+i);
+ }
+
+ void advance(DifferenceType n){
+ position_=position_+n;
+ }
+
+ DifferenceType distanceTo(const MutableIterator& other) const
+ {
+ assert(other.container_==container_);
+ return other.position_ - position_;
+ }
+
+ DifferenceType distanceTo(const ConstIterator& other) const
+ {
+ assert(other.container_==container_);
+ return other.position_ - position_;
+ }
+
+ private:
+ Container *container_;
+ DifferenceType position_;
+ };
+
+ /** @} */
+
+} // end namespace Dune
+
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_GMPFIELD_HH
+#define DUNE_GMPFIELD_HH
+
+/** \file
+ * \brief Wrapper for the GNU multiprecision (GMP) library
+ */
+
+#include <iostream>
+#include <string>
+#include <type_traits>
+
+#if HAVE_GMP || DOXYGEN
+
+#include <gmpxx.h>
+
+#include <dune/common/promotiontraits.hh>
+#include <dune/common/typetraits.hh>
+
+namespace Dune
+{
+
+ /**
+ * \ingroup Numbers
+ * \brief Number class for high precision floating point number using the GMP library mpf_class implementation
+ */
+ template< unsigned int precision >
+ class GMPField
+ : public mpf_class
+ {
+ typedef mpf_class Base;
+
+ public:
+ /** default constructor, initialize to zero */
+ GMPField ()
+ : Base(0,precision)
+ {}
+
+ /** \brief initialize from a string
+ \note this is the only reliable way to initialize with higher precision values
+ */
+ GMPField ( const char* str )
+ : Base(str,precision)
+ {}
+
+ /** \brief initialize from a string
+ \note this is the only reliable way to initialize with higher precision values
+ */
+ GMPField ( const std::string& str )
+ : Base(str,precision)
+ {}
+
+ /** \brief initialize from a compatible scalar type
+ */
+ template< class T,
+ typename EnableIf = typename std::enable_if<
+ std::is_convertible<T, mpf_class>::value>::type
+ >
+ GMPField ( const T &v )
+ : Base( v,precision )
+ {}
+
+ // type conversion operators
+ operator double () const
+ {
+ return this->get_d();
+ }
+
+ };
+
+ template <unsigned int precision>
+ struct IsNumber<GMPField<precision>>
+ : public std::integral_constant<bool, true> {
+ };
+
+ template< unsigned int precision1, unsigned int precision2 >
+ struct PromotionTraits<GMPField<precision1>, GMPField<precision2>>
+ {
+ typedef GMPField<(precision1 > precision2 ? precision1 : precision2)> PromotedType;
+ };
+
+ template< unsigned int precision >
+ struct PromotionTraits<GMPField<precision>,GMPField<precision>>
+ {
+ typedef GMPField<precision> PromotedType;
+ };
+
+ template< unsigned int precision, class T >
+ struct PromotionTraits<GMPField<precision>, T>
+ {
+ typedef GMPField<precision> PromotedType;
+ };
+
+ template< class T, unsigned int precision >
+ struct PromotionTraits<T, GMPField<precision>>
+ {
+ typedef GMPField<precision> PromotedType;
+ };
+}
+
+#endif // HAVE_GMP
+
+#endif // #ifndef DUNE_GMPFIELD_HH
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_COMMON_HASH_HH
+#define DUNE_COMMON_HASH_HH
+
+#include <functional>
+
+#include <dune/common/typetraits.hh>
+
+/**
+ * \file
+ * \brief Support for calculating hash values of objects.
+ *
+ * This file provides the functor Dune::hash to calculate hash values and
+ * some infrastructure to simplify extending Dune::hash for user-defined types,
+ * independent of the actual underlying implementation.
+ *
+ */
+
+
+
+// ********************************************************************************
+// Doxygen documentation
+// ********************************************************************************
+
+#ifdef DOXYGEN
+
+namespace Dune {
+
+ //! Functor for hashing objects of type T.
+ /**
+ * The interface outlined below is compatible with std::hash, std::tr1::hash and
+ * boost::hash, so it is possible to use Dune::hash in associative containers from
+ * those libraries.
+ */
+ template<typename T>
+ struct hash
+ {
+
+ //! Calculates the hash of t.
+ std::size_t operator()(const T& t) const
+ {
+ return hash(t);
+ }
+
+ };
+
+}
+
+//! Defines the required struct specialization to make type hashable via `Dune::hash`.
+/**
+ * In order to calculate the hash, operator() of the generated specialization will
+ * return the result of an unqualified call to the global function `hash_value(const type&)`.
+ * As the call is not qualified, the function will be found using argument-dependent lookup,
+ * allowing implementors to conveniently place it inside the class body.
+ *
+ * Consider the following type:
+ *
+ * \code
+ * namespace ns {
+ * template<typename A, int i>
+ * class Foo
+ * {
+ * ...
+ * };
+ * }
+ * \endcode
+ *
+ * In order to add support for `Dune::hash`, you need to extend the definition like this:
+ *
+ * \code
+ * namespace ns {
+ * template<typename A, int i>
+ * class Foo
+ * {
+ * ...
+ * // The keyword "friend" turns this into a global function that is a friend of Foo.
+ * inline friend std::size_t hash_value(const Foo& arg)
+ * {
+ * return ...;
+ * }
+ * };
+ * }
+ *
+ * // Define hash struct specialization
+ * DUNE_DEFINE_HASH(DUNE_HASH_TEMPLATE_ARGS(typename A, int i),DUNE_HASH_TYPE(Foo<A,i>))
+ * \endcode
+ *
+ * \warning
+ * As the specialization has to be placed in the original namespace of the
+ * `hash` struct (e.g. `std`), this macro *must* be called from the global namespace!
+ *
+ * \param template_args The template arguments required by the hash struct specialization,
+ * wrapped in a call to DUNE_HASH_TEMPLATE_ARGS. If this is a complete
+ * specialization, call DUNE_HASH_TEMPLATE_ARGS without arguments.
+ * \param type The exact type of the specialization, wrapped in a call to DUNE_HASH_TYPE.
+ */
+#define DUNE_DEFINE_HASH(template_args,type)
+
+
+//! Wrapper macro for the template arguments in DUNE_DEFINE_HASH.
+/**
+ * This macro should always be used as a wrapper for the template arguments when calling DUNE_DEFINE_HASH.
+ * It works around some preprocessor limitations when the template arguments contain commas or the list
+ * is completely empty.
+ */
+#define DUNE_HASH_TEMPLATE_ARGS(...)
+
+//! Wrapper macro for the type to be hashed in DUNE_DEFINE_HASH.
+/**
+ * This macro should always be used as a wrapper for the type of the specialization when calling
+ * DUNE_DEFINE_HASH.
+ * It works around some preprocessor limitations when the type contains commas.
+ */
+#define DUNE_HASH_TYPE(...)
+
+#else // DOXYGEN - hide all the ugly implementation
+
+
+
+// ********************************************************************************
+// C++11 support
+// ********************************************************************************
+
+// import std::hash into Dune namespace
+namespace Dune {
+
+ using std::hash;
+
+}
+
+// Macro for defining a std::hash specialization for type.
+// This should not be called directly. Call DUNE_DEFINE_HASH
+// instead.
+#define DUNE_DEFINE_STD_HASH(template_args,type) \
+ namespace std { \
+ \
+ template<template_args> \
+ struct hash<type> \
+ { \
+ \
+ typedef type argument_type; \
+ typedef std::size_t result_type; \
+ \
+ std::size_t operator()(const type& arg) const \
+ { \
+ return hash_value(arg); \
+ } \
+ }; \
+ \
+ template<template_args> \
+ struct hash<const type> \
+ { \
+ \
+ typedef type argument_type; \
+ typedef std::size_t result_type; \
+ \
+ std::size_t operator()(const type& arg) const \
+ { \
+ return hash_value(arg); \
+ } \
+ }; \
+ \
+ } \
+
+// Wrapper macro for template arguments.
+// This is required because the template arguments can contain commas,
+// which will create a macro argument list of unknown length. That in itself
+// would not be a problem, but DUNE_DEFINE_HASH has to be called with two argument
+// lists of unknown length. So this macro wraps its arguments with parentheses,
+// turning it into a single argument. The result is used as the parameter list of
+// an expansion macro in the calls to the implementation-specific macros
+// for C++11 and TR1. Noto that technically, this trick is only legal for C++11,
+// but pretty much every compiler supports variadic macros in C++03 mode, as they
+// are part of C99.
+#define DUNE_HASH_TEMPLATE_ARGS(...) (__VA_ARGS__)
+
+// Wrapper macro for type to be hashed.
+// See above for rationale.
+#define DUNE_HASH_TYPE(...) (__VA_ARGS__)
+
+// Expansion macro for the parenthesed argument lists created by
+// DUNE_HASH_TEMPLATE_ARGS and DUNE_HASH_TYPE.
+#define DUNE_HASH_EXPAND_VA_ARGS(...) __VA_ARGS__
+
+// Define specializations for all discovered hash implementations.
+#define DUNE_DEFINE_HASH(template_args,type) \
+ DUNE_DEFINE_STD_HASH(DUNE_HASH_EXPAND_VA_ARGS template_args, DUNE_HASH_EXPAND_VA_ARGS type) \
+
+
+#endif // DOXYGEN
+
+
+
+// ********************************************************************************
+// Some utility functions for combining hashes of member variables.
+// ********************************************************************************
+
+namespace Dune {
+
+ // The following functions are an implementation of the proposed hash extensions for
+ // the C++ standard by Peter Dimov
+ // (cf. http://www.open-std.org/JTC1/SC22/WG21/docs/papers/2005/n1756.pdf, issue 6.18).
+ // They are also contained in the boost::functional::hash library by Daniel James, but
+ // that implementation uses boost::hash internally, while we want to use Dune::hash. They
+ // are also considered for inclusion in TR2 (then based on std::hash, of course).
+
+#ifndef DOXYGEN
+
+ // helper struct for providing different hash combining algorithms dependent on
+ // the size of size_t.
+ // hash_combiner has to be specialized for the size (in bytes) of std::size_t.
+ // Specialized versions should provide a method
+ //
+ // template <typename typeof_size_t, typename T>
+ // void operator()(typeof_size_t& seed, const T& arg) const;
+ //
+ // that will be called by the interface function hash_combine() described further below.
+ // The redundant template parameter typeof_size_t is needed to avoid warnings for the
+ // unused 64-bit specialization on 32-bit systems.
+ //
+ // There is no default implementation!
+ template<int sizeof_size_t>
+ struct hash_combiner;
+
+
+ // hash combining for 64-bit platforms.
+ template<>
+ struct hash_combiner<8>
+ {
+
+ template<typename typeof_size_t, typename T>
+ void operator()(typeof_size_t& seed, const T& arg) const
+ {
+ static_assert(sizeof(typeof_size_t)==8, "hash_combiner::operator() instantiated with nonmatching type and size");
+
+ // The following algorithm for combining two 64-bit hash values is inspired by a similar
+ // function in CityHash (http://cityhash.googlecode.com/svn-history/r2/trunk/src/city.h),
+ // which is in turn based on ideas from the MurmurHash library. The basic idea is easy to
+ // grasp, though: New information is XORed into the existing hash multiple times at different
+ // places (using shift operations), and the resulting pattern is spread over the complete
+ // range of available bits via multiplication with a "magic" constant. The constants used
+ // below (47 and 0x9ddfea08eb382d69ULL) are taken from the CityHash implementation.
+ //
+ // We opted not to use the mixing algorithm proposed in the C++ working group defect list at
+ // http://www.open-std.org/JTC1/SC22/WG21/docs/papers/2005/n1756.pdf, p. 57f. because it
+ // has very bad hash distribution properties if you apply it to lists of very small numbers,
+ // an application that is frequent in PDELab's ordering framework.
+
+ Dune::hash<T> hasher;
+ const typeof_size_t kMul = 0x9ddfea08eb382d69ULL;
+ typeof_size_t h = hasher(arg);
+ typeof_size_t a = (seed ^ h) * kMul;
+ a ^= (a >> 47);
+ typeof_size_t b = (h ^ a) * kMul;
+ b ^= (b >> 47);
+ b *= kMul;
+ seed = b;
+ }
+
+ };
+
+
+ // hash combining for 32-bit platforms.
+ template<>
+ struct hash_combiner<4>
+ {
+
+ template<typename typeof_size_t, typename T>
+ void operator()(typeof_size_t& seed, const T& arg) const
+ {
+ static_assert(sizeof(typeof_size_t)==4, "hash_combiner::operator() instantiated with nonmatching type and size");
+
+ // The default algorithm above requires a 64-bit std::size_t. The following algorithm is a
+ // 32-bit compatible fallback, again inspired by CityHash and MurmurHash
+ // (http://cityhash.googlecode.com/svn-history/r2/trunk/src/city.cc).
+ // It uses 32-bit constants and relies on rotation instead of multiplication to spread the
+ // mixed bits as that is apparently more efficient on IA-32. The constants used below are again
+ // taken from CityHash, in particular from the file referenced above.
+
+ Dune::hash<T> hasher;
+ const typeof_size_t c1 = 0xcc9e2d51;
+ const typeof_size_t c2 = 0x1b873593;
+ const typeof_size_t c3 = 0xe6546b64;
+ typeof_size_t h = hasher(arg);
+ typeof_size_t a = seed * c1;
+ a = (a >> 17) | (a << (32 - 17));
+ a *= c2;
+ h ^= a;
+ h = (h >> 19) | (h << (32 - 19));
+ seed = h * 5 + c3;
+ }
+
+ };
+
+#endif // DOXYGEN
+
+ //! Calculates the hash value of arg and combines it in-place with seed.
+ /**
+ *
+ * \param seed The hash value that will be combined with the hash of arg.
+ * \param arg The object for which to calculate a hash value and combine it with seed.
+ */
+ template<typename T>
+ inline void hash_combine(std::size_t& seed, const T& arg)
+ {
+ hash_combiner<sizeof(std::size_t)>()(seed,arg);
+ }
+
+ //! Hashes all elements in the range [first,last) and returns the combined hash.
+ /**
+ *
+ * \param first Iterator pointing to the first object to hash.
+ * \param last Iterator pointing one past the last object to hash.
+
+ * \returns The result of hashing all objects in the range and combining them
+ * using hash_combine() in sequential fashion, starting with seed 0.
+ */
+ template<typename It>
+ inline std::size_t hash_range(It first, It last)
+ {
+ std::size_t seed = 0;
+ for (; first != last; ++first)
+ {
+ hash_combine(seed,*first);
+ }
+ return seed;
+ }
+
+ //! Hashes all elements in the range [first,last) and combines the hashes in-place with seed.
+ /**
+ *
+ * \param seed Start value that will be combined with the hash values of all objects in
+ * the range using hash_combine() in sequential fashion.
+ * \param first Iterator pointing to the first ojbect to hash.
+ * \param last Iterator pointing one past the last object to hash.
+ */
+ template<typename It>
+ inline void hash_range(std::size_t& seed, It first, It last)
+ {
+ for (; first != last; ++first)
+ {
+ hash_combine(seed,*first);
+ }
+ }
+
+} // end namespace Dune
+
+#endif // DUNE_COMMON_HASH_HH
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_COMMON_HYBRIDUTILITIES_HH
+#define DUNE_COMMON_HYBRIDUTILITIES_HH
+
+#include <tuple>
+#include <utility>
+
+#include <dune/common/typetraits.hh>
+#include <dune/common/typeutilities.hh>
+#include <dune/common/fvector.hh>
+#include <dune/common/indices.hh>
+#include <dune/common/assertandreturn.hh>
+#include <dune/common/rangeutilities.hh>
+
+
+
+namespace Dune {
+namespace Hybrid {
+
+namespace Impl {
+
+ // Try if tuple_size is implemented for class
+ template<class T, int i>
+ constexpr auto size(const Dune::FieldVector<T, i>&, const PriorityTag<5>&)
+ -> decltype(std::integral_constant<std::size_t,i>())
+ {
+ return {};
+ }
+
+ // Try if tuple_size is implemented for class
+ template<class T>
+ constexpr auto size(const T&, const PriorityTag<3>&)
+ -> decltype(std::integral_constant<std::size_t,std::tuple_size<T>::value>())
+ {
+ return {};
+ }
+
+ // Try if there's a static constexpr size()
+ template<class T>
+ constexpr auto size(const T&, const PriorityTag<1>&)
+ -> decltype(std::integral_constant<std::size_t,T::size()>())
+ {
+ return {};
+ }
+
+ // As a last resort try if there's a static constexpr size()
+ template<class T>
+ constexpr auto size(const T& t, const PriorityTag<0>&)
+ {
+ return t.size();
+ }
+
+} // namespace Impl
+
+
+
+/**
+ * \brief Size query
+ *
+ * \ingroup HybridUtilities
+ *
+ * \tparam T Type of container whose size is queried
+ *
+ * \param t Container whose size is queried
+ *
+ * \return Size of t
+ *
+ * If the size of t is known at compile type the size is
+ * returned as std::integral_constant<std::size_t, size>.
+ * Otherwise the result of t.size() is returned.
+ *
+ * Supported types for deriving the size at compile time are:
+ * * instances of std::integer_sequence
+ * * all types std::tuple_size is implemented for
+ * * all typed that have a static method ::size()
+ * * instances of Dune::FieldVector
+ */
+template<class T>
+constexpr auto size(const T& t)
+{
+ return Impl::size(t, PriorityTag<42>());
+}
+
+
+
+namespace Impl {
+
+ template<class Container, class Index,
+ std::enable_if_t<IsTuple<std::decay_t<Container>>::value, int> = 0>
+ constexpr decltype(auto) elementAt(Container&& c, Index&&, PriorityTag<2>)
+ {
+ return std::get<std::decay_t<Index>::value>(c);
+ }
+
+ template<class T, T... t, class Index>
+ constexpr decltype(auto) elementAt(std::integer_sequence<T, t...> c, Index, PriorityTag<1>)
+ {
+ return Dune::integerSequenceEntry(c, std::integral_constant<std::size_t, Index::value>());
+ }
+
+ template<class Container, class Index>
+ constexpr decltype(auto) elementAt(Container&& c, Index&& i, PriorityTag<0>)
+ {
+ return c[i];
+ }
+
+} // namespace Impl
+
+
+
+/**
+ * \brief Get element at given position from container
+ *
+ * \ingroup HybridUtilities
+ *
+ * \tparam Container Type of given container
+ * \tparam Index Type of index
+ *
+ * \param c Given container
+ * \param i Index of element to obtain
+ *
+ * \return The element at position i, i.e. c[i]
+ *
+ * If this returns the i-th entry of c. It supports the following
+ * containers
+ * * Containers providing dynamic access via operator[]
+ * * Heterogeneous containers providing access via operator[](integral_constant<...>)
+ * * std::tuple<...>
+ * * std::integer_sequence
+ */
+template<class Container, class Index>
+constexpr decltype(auto) elementAt(Container&& c, Index&& i)
+{
+ return Impl::elementAt(std::forward<Container>(c), std::forward<Index>(i), PriorityTag<42>());
+}
+
+
+
+namespace Impl {
+
+ template<class Begin, class End,
+ std::enable_if_t<IsIntegralConstant<Begin>::value and IsIntegralConstant<End>::value, int> = 0>
+ constexpr auto integralRange(const Begin& /*begin*/, const End& /*end*/, const PriorityTag<1>&)
+ {
+ static_assert(Begin::value <= End::value, "You cannot create an integralRange where end<begin");
+ return Dune::StaticIntegralRange<std::size_t, End::value, Begin::value>();
+ }
+
+ // This should be constexpr but gcc-4.9 does not support
+ // the relaxed constexpr requirements. Hence for being
+ // constexpr the function body can only contain a return
+ // statement and no assertion before this.
+ template<class Begin, class End>
+ constexpr auto integralRange(const Begin& begin, const End& end, const PriorityTag<0>&)
+ {
+ return DUNE_ASSERT_AND_RETURN(begin<=end, Dune::IntegralRange<End>(begin, end));
+ }
+
+} // namespace Impl
+
+
+
+/**
+ * \brief Create an integral range
+ *
+ * \ingroup HybridUtilities
+ *
+ * \tparam Begin Type of begin entry of the range
+ * \tparam End Type of end entry of the range
+ *
+ * \param begin First entry of the range
+ * \param end One past the last entry of the range
+ *
+ * \returns An object encoding the given range
+ *
+ * If Begin and End are both instances of type
+ * std::integral_constant, the returned range
+ * encodes begin and end statically.
+ */
+template<class Begin, class End>
+constexpr auto integralRange(const Begin& begin, const End& end)
+{
+ return Impl::integralRange(begin, end, PriorityTag<42>());
+}
+
+/**
+ * \brief Create an integral range starting from 0
+ *
+ * \ingroup HybridUtilities
+ *
+ * \tparam End Type of end entry of the range
+ *
+ * \param end One past the last entry of the range
+ *
+ * \returns An object encoding the given range
+ *
+ * This is a short cut for integralRange(_0, end).
+ */
+template<class End>
+constexpr auto integralRange(const End& end)
+{
+ return Impl::integralRange(Dune::Indices::_0, end, PriorityTag<42>());
+}
+
+
+
+namespace Impl {
+
+ template<class T>
+ constexpr void evaluateFoldExpression(std::initializer_list<T>&&)
+ {}
+
+ template<class Range, class F, class Index, Index... i>
+ constexpr void forEachIndex(Range&& range, F&& f, std::integer_sequence<Index, i...>)
+ {
+ evaluateFoldExpression<int>({(f(Hybrid::elementAt(range, std::integral_constant<Index,i>())), 0)...});
+ }
+
+ template<class F, class Index, Index... i>
+ constexpr void forEach(std::integer_sequence<Index, i...> /*range*/, F&& f, PriorityTag<2>)
+ {
+ evaluateFoldExpression<int>({(f(std::integral_constant<Index,i>()), 0)...});
+ }
+
+
+ template<class Range, class F,
+ std::enable_if_t<IsIntegralConstant<decltype(Hybrid::size(std::declval<Range>()))>::value, int> = 0>
+ constexpr void forEach(Range&& range, F&& f, PriorityTag<1>)
+ {
+ auto size = Hybrid::size(range);
+ auto indices = std::make_index_sequence<size>();
+ (forEachIndex)(std::forward<Range>(range), std::forward<F>(f), indices);
+ }
+
+ template<class Range, class F>
+ constexpr void forEach(Range&& range, F&& f, PriorityTag<0>)
+ {
+ for(auto&& e : range)
+ f(e);
+ }
+
+} // namespace Impl
+
+
+
+/**
+ * \brief Range based for loop
+ *
+ * \ingroup HybridUtilities
+ *
+ * \tparam Range Type of given range
+ * \tparam F Type of given predicate
+ *
+ * \param range The range to loop over
+ * \param f A predicate that will be called with each entry of the range
+ *
+ * This supports looping over the following ranges
+ * * ranges obtained from integralRange()
+ * * all ranges that provide Hybrid::size() and Hybrid::elementAt()
+ *
+ * This especially included instances of std::integer_sequence,
+ * std::tuple, Dune::TupleVector, and Dune::MultiTypeBlockVector.
+ */
+template<class Range, class F>
+constexpr void forEach(Range&& range, F&& f)
+{
+ Impl::forEach(std::forward<Range>(range), std::forward<F>(f), PriorityTag<42>());
+}
+
+
+
+/**
+ * \brief Accumulate values
+ *
+ * \ingroup HybridUtilities
+ *
+ * \tparam Range Type of given range
+ * \tparam T Type of accumulated value
+ * \tparam F Type of binary accumulation operator
+ *
+ * \param range The range of values to accumulate
+ * \param value Initial value for accumulation
+ * \param f Binary operator for accumulation
+ *
+ * This supports looping over the same ranges as Hybrid::forEach
+ */
+template<class Range, class T, class F>
+constexpr T accumulate(Range&& range, T value, F&& f)
+{
+ forEach(std::forward<Range>(range), [&](auto&& entry) {
+ value = f(value, entry);
+ });
+ return value;
+}
+
+
+
+namespace Impl {
+
+ struct Id {
+ template<class T>
+ constexpr T operator()(T&& x) const {
+ return std::forward<T>(x);
+ }
+ };
+
+ template<class IfFunc, class ElseFunc>
+ constexpr decltype(auto) ifElse(std::true_type, IfFunc&& ifFunc, ElseFunc&& /*elseFunc*/)
+ {
+ return ifFunc(Id{});
+ }
+
+ template<class IfFunc, class ElseFunc>
+ constexpr decltype(auto) ifElse(std::false_type, IfFunc&& /*ifFunc*/, ElseFunc&& elseFunc)
+ {
+ return elseFunc(Id{});
+ }
+
+ template<class IfFunc, class ElseFunc>
+ decltype(auto) ifElse(const bool& condition, IfFunc&& ifFunc, ElseFunc&& elseFunc)
+ {
+ if (condition)
+ return ifFunc(Id{});
+ else
+ return elseFunc(Id{});
+ }
+
+} // namespace Impl
+
+
+
+/**
+ * \brief A conditional expression
+ *
+ * \ingroup HybridUtilities
+ *
+ * This will call either ifFunc or elseFunc depending
+ * on the condition. In any case a single argument
+ * will be passed to the called function. This will always
+ * be the identity function. Passing an expression through
+ * this function will lead to lazy evaluation. This way both
+ * 'branches' can contain expressions that are only valid
+ * within this branch if the condition is a std::integral_constant<bool,*>.
+ *
+ * In order to do this, the passed functors must have a single
+ * argument of type auto.
+ *
+ * Due to the lazy evaluation mechanism and support for
+ * std::integral_constant<bool,*> this allows to emulate
+ * a static if statement.
+ */
+template<class Condition, class IfFunc, class ElseFunc>
+decltype(auto) ifElse(const Condition& condition, IfFunc&& ifFunc, ElseFunc&& elseFunc)
+{
+ return Impl::ifElse(condition, std::forward<IfFunc>(ifFunc), std::forward<ElseFunc>(elseFunc));
+}
+
+/**
+ * \brief A conditional expression
+ *
+ * \ingroup HybridUtilities
+ *
+ * This provides an ifElse conditional with empty else clause.
+ */
+template<class Condition, class IfFunc>
+void ifElse(const Condition& condition, IfFunc&& ifFunc)
+{
+ ifElse(condition, std::forward<IfFunc>(ifFunc), [](auto&&) {});
+}
+
+
+
+namespace Impl {
+
+ template<class T1, class T2>
+ constexpr auto equals(const T1& /*t1*/, const T2& /*t2*/, PriorityTag<1>) -> decltype(T1::value, T2::value, std::integral_constant<bool,T1::value == T2::value>())
+ { return {}; }
+
+ template<class T1, class T2>
+ constexpr auto equals(const T1& t1, const T2& t2, PriorityTag<0>)
+ {
+ return t1==t2;
+ }
+
+} // namespace Impl
+
+
+
+/**
+ * \brief Equality comparison
+ *
+ * \ingroup HybridUtilities
+ *
+ * If both types have a static member value, the result of comparing
+ * these is returned as std::integral_constant<bool, *>. Otherwise
+ * the result of a runtime comparison of t1 and t2 is directly returned.
+ */
+template<class T1, class T2>
+constexpr auto equals(T1&& t1, T2&& t2)
+{
+ return Impl::equals(std::forward<T1>(t1), std::forward<T2>(t2), PriorityTag<1>());
+}
+
+
+
+namespace Impl {
+
+ template<class Result, class T, class Value, class Branches, class ElseBranch>
+ constexpr Result switchCases(std::integer_sequence<T>, const Value& /*value*/, Branches&& /*branches*/, ElseBranch&& elseBranch)
+ {
+ return elseBranch();
+ }
+
+ template<class Result, class T, T t0, T... tt, class Value, class Branches, class ElseBranch>
+ constexpr Result switchCases(std::integer_sequence<T, t0, tt...>, const Value& value, Branches&& branches, ElseBranch&& elseBranch)
+ {
+ return ifElse(
+ Hybrid::equals(std::integral_constant<T, t0>(), value),
+ [&](auto id) -> decltype(auto) {
+ return id(branches)(std::integral_constant<T, t0>());
+ }, [&](auto id) -> decltype(auto) {
+ return Impl::switchCases<Result>(id(std::integer_sequence<T, tt...>()), value, branches, elseBranch);
+ });
+ }
+
+} // namespace Impl
+
+
+
+/**
+ * \brief Switch statement
+ *
+ * \ingroup HybridUtilities
+ *
+ * \tparam Cases Type of case range
+ * \tparam Value Type of value to check against the cases
+ * \tparam Branches Type of branch function
+ * \tparam ElseBranch Type of branch function
+ *
+ * \param cases A range of cases to check for
+ * \param value The value to check against the cases
+ * \param branches A callback that will be executed with matching entry from case list
+ * \param elseBranch A callback that will be executed if no other entry matches
+ *
+ * Value is checked against all entries of the given range.
+ * If one matches, then branches is executed with the matching
+ * value as single argument. If the range is an std::integer_sequence,
+ * the value is passed as std::integral_constant.
+ * If non of the entries matches, then elseBranch is executed
+ * without any argument.
+ *
+ * Notice that this short circuits, e.g., if one case matches,
+ * the others are no longer evaluated.
+ *
+ * The return value will be deduced from the else branch.
+ */
+template<class Cases, class Value, class Branches, class ElseBranch>
+constexpr decltype(auto) switchCases(const Cases& cases, const Value& value, Branches&& branches, ElseBranch&& elseBranch)
+{
+ return Impl::switchCases<decltype(elseBranch())>(cases, value, std::forward<Branches>(branches), std::forward<ElseBranch>(elseBranch));
+}
+
+/**
+ * \brief Switch statement
+ *
+ * \ingroup HybridUtilities
+ *
+ * \tparam Cases Type of case range
+ * \tparam Value Type of value to check against the cases
+ * \tparam Branches Type of branch function
+ *
+ * \param cases A range of cases to check for
+ * \param value The value to check against the cases
+ * \param branches A callback that will be executed with matching entry from case list
+ *
+ * Value is checked against all entries of the given range.
+ * If one matches, then branches is executed with the matching
+ * value as single argument. If the range is an std::integer_sequence,
+ * the value is passed as std::integral_constant.
+ * If non of the entries matches, then elseBranch is executed
+ * without any argument.
+ */
+template<class Cases, class Value, class Branches>
+constexpr void switchCases(const Cases& cases, const Value& value, Branches&& branches)
+{
+ Impl::switchCases<void>(cases, value, std::forward<Branches>(branches), []() {});
+}
+
+
+} // namespace Hybrid
+} // namespace Dune
+
+
+#endif // #ifndef DUNE_COMMON_HYBRIDUTILITIES_HH
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_COMMON_INDENT_HH
+#define DUNE_COMMON_INDENT_HH
+
+#include <ostream>
+#include <string>
+
+namespace Dune {
+ /** @addtogroup Common
+ *
+ * @{
+ */
+ /**
+ * @file
+ * @brief Utility class for handling nested indentation in output.
+ * @author Jö Fahlke
+ */
+ //! Utility class for handling nested indentation in output.
+ /**
+ * An indentation object hast a string basic_indent and an indentation
+ * level. When it is put into a std::ostream using << it will print its
+ * basic_indent as many times as its indentation level. By default the
+ * basic_indent will be two spaces and the indentation level will be 0.
+ *
+ * An Indent object may also have a reference to a parent Indent object. If
+ * it has, that object it put into the stream with the << operator before
+ * the indentation of this object is put into the stream. This effectively
+ * chains Indent objects together.
+ *
+ * You can use the ++ operator to raise and the -- operator to lower the
+ * indentation by one level.
+ *
+ * You can use the + operator with a numeric second argument morelevel to
+ * create a copy of the Indent object with the indentation level increased
+ * morelevel times. This is mainly useful to pass indent+1 to a function,
+ * where indent is an indentation object.
+ *
+ * You can use the + operator with a string second argument newindent to
+ * create a new Indent object with this object as parent, a basic_indent of
+ * newindent, and an indentation level of one. This is mainly useful to
+ * pass indent+"> " to a function, where "> " is a possibly different
+ * indentation string then the one used by indent indentation object.
+ *
+ * \note The idea is for functions receive indentation objects as call by
+ * value parameters. This way, the indentation object of the caller
+ * will not be modified by the function and the function can simply
+ * return at anytime without having to clean up.
+ */
+ class Indent
+ {
+ const Indent* parent;
+ std::string basic_indent;
+ unsigned level;
+
+ public:
+ //! setup without parent
+ /**
+ * \note Initial indentation level is 0 by default for this constructor.
+ */
+ inline Indent(const std::string& basic_indent_ = " ", unsigned level_ = 0)
+ : parent(0), basic_indent(basic_indent_), level(level_)
+ { }
+
+ //! setup without parent and basic_indentation of two spaces
+ inline Indent(unsigned level_)
+ : parent(0), basic_indent(" "), level(level_)
+ { }
+
+ //! setup with parent
+ /**
+ * \note Initial indentation level is 1 by default for this constructor.
+ */
+ inline Indent(const Indent* parent_,
+ const std::string& basic_indent_ = " ", unsigned level_ = 1)
+ : parent(parent_), basic_indent(basic_indent_), level(level_)
+ { }
+
+ //! setup with parent
+ inline Indent(const Indent* parent_, unsigned level_)
+ : parent(parent_), basic_indent(" "), level(level_)
+ { }
+
+ //! create new indentation object with this one as parent
+ inline Indent operator+(const std::string& newindent) const {
+ return Indent(this, newindent);
+ }
+ //! create a copy of this indentation object with raised level
+ inline Indent operator+(unsigned morelevel) const {
+ return Indent(parent, basic_indent, level+morelevel);
+ }
+ //! raise indentation level
+ inline Indent& operator++() { ++level; return *this; }
+ //! lower indentation level
+ inline Indent& operator--() { if ( level > 0 ) --level; return *this; }
+
+ //! write indentation to a stream
+ friend inline std::ostream& operator<<(std::ostream& s,
+ const Indent& indent);
+ };
+
+ //! write indentation to a stream
+ inline std::ostream& operator<<(std::ostream& s, const Indent& indent) {
+ if(indent.parent)
+ s << *indent.parent;
+ for(unsigned i = 0; i < indent.level; ++i)
+ s << indent.basic_indent;
+ return s;
+ }
+
+ /** }@ group Common */
+
+} // namespace Dune
+
+#endif // DUNE_COMMON_INDENT_HH
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+#ifndef DUNE_COMMON_INDICES_HH
+#define DUNE_COMMON_INDICES_HH
+
+#include <cstddef>
+#include <type_traits>
+#include <utility>
+
+#include <dune/common/keywords.hh>
+
+namespace Dune
+{
+ /** \addtogroup Common
+ * \{
+ */
+
+ /** \brief An index constant with value i
+ *
+ * An index constant is a simple type alias for an integral_constant.
+ * Its main advantages are clarity (it is easier to see that code uses it
+ * as an index) and the fact that the integral type is fixed, reducing verbosity
+ * and avoiding the problem of maybe trying to overload / specialize using a different
+ * integral type.
+ */
+ template<std::size_t i>
+ using index_constant = std::integral_constant<std::size_t, i>;
+
+
+
+ /** \brief Namespace with predefined compile time indices for the range [0,19]
+ *
+ * The predefined index objects in this namespace are `constexpr`, which allows them to
+ * be used in situations where a compile time constant is needed, e.g. for a template
+ * parameter. Apart from that, `constexpr` implies internal linkage, which helps to avoid
+ * ODR problems.
+ *
+ * The constants implicitly convert to their contained value, so you can for example write
+ *
+ * \code{.cc}
+ * std::array<int,_10> a;
+ * // the above line is equivalent to
+ * std::array<int,10> b;
+ * \endcode
+ *
+ */
+ namespace Indices
+ {
+ //! Compile time index with value 0.
+ DUNE_INLINE_VARIABLE constexpr index_constant< 0> _0 = {};
+
+ //! Compile time index with value 1.
+ DUNE_INLINE_VARIABLE constexpr index_constant< 1> _1 = {};
+
+ //! Compile time index with value 2.
+ DUNE_INLINE_VARIABLE constexpr index_constant< 2> _2 = {};
+
+ //! Compile time index with value 3.
+ DUNE_INLINE_VARIABLE constexpr index_constant< 3> _3 = {};
+
+ //! Compile time index with value 4.
+ DUNE_INLINE_VARIABLE constexpr index_constant< 4> _4 = {};
+
+ //! Compile time index with value 5.
+ DUNE_INLINE_VARIABLE constexpr index_constant< 5> _5 = {};
+
+ //! Compile time index with value 6.
+ DUNE_INLINE_VARIABLE constexpr index_constant< 6> _6 = {};
+
+ //! Compile time index with value 7.
+ DUNE_INLINE_VARIABLE constexpr index_constant< 7> _7 = {};
+
+ //! Compile time index with value 8.
+ DUNE_INLINE_VARIABLE constexpr index_constant< 8> _8 = {};
+
+ //! Compile time index with value 9.
+ DUNE_INLINE_VARIABLE constexpr index_constant< 9> _9 = {};
+
+ //! Compile time index with value 10.
+ DUNE_INLINE_VARIABLE constexpr index_constant<10> _10 = {};
+
+ //! Compile time index with value 11.
+ DUNE_INLINE_VARIABLE constexpr index_constant<11> _11 = {};
+
+ //! Compile time index with value 12.
+ DUNE_INLINE_VARIABLE constexpr index_constant<12> _12 = {};
+
+ //! Compile time index with value 13.
+ DUNE_INLINE_VARIABLE constexpr index_constant<13> _13 = {};
+
+ //! Compile time index with value 14.
+ DUNE_INLINE_VARIABLE constexpr index_constant<14> _14 = {};
+
+ //! Compile time index with value 15.
+ DUNE_INLINE_VARIABLE constexpr index_constant<15> _15 = {};
+
+ //! Compile time index with value 16.
+ DUNE_INLINE_VARIABLE constexpr index_constant<16> _16 = {};
+
+ //! Compile time index with value 17.
+ DUNE_INLINE_VARIABLE constexpr index_constant<17> _17 = {};
+
+ //! Compile time index with value 18.
+ DUNE_INLINE_VARIABLE constexpr index_constant<18> _18 = {};
+
+ //! Compile time index with value 19.
+ DUNE_INLINE_VARIABLE constexpr index_constant<19> _19 = {};
+
+ } // namespace Indices
+
+ /**
+ * \brief Unpack an std::integer_sequence<I,i...> to std::integral_constant<I,i>...
+ *
+ * This forward all entries of the given std::integer_sequence
+ * as individual std::integral_constant arguments to the given callback.
+ *
+ * \param f Callback which has to accept unpacked values
+ * \param sequence Packed std::integer_sequence of values
+ * \returns Result of calling f with unpacked integers.
+ */
+ template<class F, class I, I... i>
+ decltype(auto) unpackIntegerSequence(F&& f, std::integer_sequence<I, i...> sequence)
+ {
+ return f(std::integral_constant<I, i>()...);
+ }
+
+} //namespace Dune
+
+#endif // DUNE_COMMON_INDICES_HH
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_INTERFACES_HH
+#define DUNE_INTERFACES_HH
+
+/** @file
+ @author Robert Kloefkorn
+ @brief Provides interfaces for detection of specific behavior
+ */
+
+namespace Dune {
+
+ //! An interface class for cloneable objects
+ struct Cloneable {
+
+ /** \brief Clones the object
+ * clone needs to be redefined by an implementation class, with the
+ * return type covariantly adapted. Remember to
+ * delete the resulting pointer.
+ */
+ virtual Cloneable* clone() const = 0;
+
+ /** \brief Destructor */
+ virtual ~Cloneable()
+ {}
+
+ };
+
+} // end namespace Dune
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+#if HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <dune/common/ios_state.hh>
+
+namespace Dune {
+
+ //////////////////////////////////////////////////////////////////////
+ //
+ // class ios_base_all_saver
+ //
+
+ ios_base_all_saver::ios_base_all_saver(state_type& ios_)
+ : ios(ios_), oldflags(ios.flags()), oldprec(ios.precision()),
+ oldwidth(ios.width())
+ {}
+
+ ios_base_all_saver::~ios_base_all_saver()
+ {
+ restore();
+ }
+
+ void ios_base_all_saver::restore()
+ {
+ ios.flags(oldflags);
+ ios.precision(oldprec);
+ ios.width(oldwidth);
+ }
+
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_COMMON_IOS_STATE_HH
+#define DUNE_COMMON_IOS_STATE_HH
+
+#include <ios>
+
+namespace Dune {
+ /** @addtogroup Common
+ *
+ * @{
+ */
+ /**
+ * @file
+ * @brief Utility class for storing and resetting stream attributes.
+ * @author Markus Blatt
+ */
+ /**
+ * @brief Utility class for storing and resetting stream attributes.
+ *
+ * The constructor saves the attributes currently set in the ios_base
+ * object and the destructor restores these attributes again. The
+ * attributes can also be restores at any time by calling the method
+ * restore().
+ *
+ * The saved attributes are the format flags, precision, and width.
+ *
+ * @note The interface of this class is meant to be drop-in compatible the
+ * the class of the same name from <boost/io/ios_state.hpp>.
+ */
+ class ios_base_all_saver
+ {
+ public:
+ /** @brief Export type of object we save the state for */
+ typedef std::ios_base state_type;
+
+ /**
+ * @brief Constructor that stores the currently used flags.
+ * @param ios_ The ios_base object whose flags are to be saved and
+ * restored. Any stream object should work here.
+ *
+ * @note A reference to the ios_base object is store in this object. Thus
+ * the ios_base object must remain valid until the destructor of
+ * this object has been called.
+ */
+ ios_base_all_saver(state_type& ios_);
+
+ /**
+ * @brief Destructor that restores the flags stored by the constructor.
+ */
+ ~ios_base_all_saver();
+
+ /**
+ * @brief Restore flags now
+ *
+ * The flags will also be restored at destruction time even if this method
+ * was used.
+ */
+ void restore();
+
+ private:
+ /** @brief the ios object to restore the flags to. */
+ state_type& ios;
+ /** @brief The flags used when the constructor was called. */
+ state_type::fmtflags oldflags;
+ /** @brief The precision in use when the constructor was called. */
+ std::streamsize oldprec;
+ /** @brief The width in use when the constructor was called. */
+ std::streamsize oldwidth;
+ };
+
+ /** }@ */
+}
+
+#endif // DUNE_COMMON_IOS_STATE_HH
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_ITERATORFACADES_HH
+#define DUNE_ITERATORFACADES_HH
+
+#include <iterator>
+#include <type_traits>
+
+#include "typetraits.hh"
+
+namespace Dune
+{
+ /*! \defgroup IteratorFacades Iterator facades
+ \ingroup Common
+
+ \brief Iterator facades for writing stl conformant iterators.
+
+ With using these facades writing iterators for arbitrary containers becomes much less
+ cumbersome as only few functions have to be implemented. All other functions needed by
+ the stl are provided by the facades using the Barton-Nackman trick (also known as
+ curiously recurring template pattern).
+
+ The following example illustrates how a random access iterator might be written:
+
+ \code
+ #include<dune/common/iteratorfacades.hh>
+
+ ...
+
+ template<class C, class T>
+ class TestIterator : public Dune::BidirectionalIteratorFacade<TestIterator<C,T>,T, T&, int>
+ {
+ friend class TestIterator<typename std::remove_const<C>::type, typename std::remove_const<T>::type >;
+ friend class TestIterator<const typename std::remove_const<C>::type, const typename std::remove_const<T>::type >;
+
+ public:
+
+ // Constructors needed by the facade iterators.
+ TestIterator(): container_(0), position_(0)
+ { }
+
+ TestIterator(C& cont, int pos)
+ : container_(&cont), position_(pos)
+ {}
+
+ TestIterator(const TestIterator<typename std::remove_const<C>::type, typename std::remove_const<T>::type >& other)
+ : container_(other.container_), position_(other.position_)
+ {}
+
+
+ TestIterator(const TestIterator<const typename std::remove_const<C>::type, const typename std::remove_const<T>::type >& other)
+ : container_(other.container_), position_(other.position_)
+ {}
+
+ // Methods needed by the forward iterator
+ bool equals(const TestIterator<typename std::remove_const<C>::type,typename std::remove_const<T>::type>& other) const
+ {
+ return position_ == other.position_ && container_ == other.container_;
+ }
+
+
+ bool equals(const TestIterator<const typename std::remove_const<C>::type,const typename std::remove_const<T>::type>& other) const
+ {
+ return position_ == other.position_ && container_ == other.container_;
+ }
+
+ T& dereference() const
+ {
+ return container_->values_[position_];
+ }
+
+ void increment()
+ {
+ ++position_;
+ }
+
+ // Additional function needed by BidirectionalIterator
+ void decrement()
+ {
+ --position_;
+ }
+
+ // Additional function needed by RandomAccessIterator
+ T& elementAt(int i)const
+ {
+ return container_->operator[](position_+i);
+ }
+
+ void advance(int n)
+ {
+ position_=position_+n;
+ }
+
+ std::ptrdiff_t distanceTo(TestIterator<const typename std::remove_const<C>::type,const typename std::remove_const<T>::type> other) const
+ {
+ assert(other.container_==container_);
+ return other.position_ - position_;
+ }
+
+ std::ptrdiff_t distanceTo(TestIterator<const typename std::remove_const<C>::type, typename std::remove_const<T>::type> other) const
+ {
+ assert(other.container_==container_);
+ return other.position_ - position_;
+ }
+ private:
+ C *container_;
+ size_t position_;
+ };
+
+ \endcode
+ See dune/common/test/iteratorbase.hh for details.
+ */
+
+
+ /**
+ * @file
+ * @brief This file implements iterator facade classes for writing stl conformant iterators.
+ *
+ * With using these facades writing iterators for arbitrary containers becomes much less
+ * cumbersome as only few functions have to be implemented. All other functions needed by
+ * the stl are provided by the facades using the Barton-Nackman trick (also known as
+ * curiously recurring template pattern.
+ */
+
+ /** @addtogroup IteratorFacades
+ *
+ * @{
+ */
+ /**
+ * @brief Base class for stl conformant forward iterators.
+ *
+ * \tparam T The derived class
+ * \tparam V The value type
+ * \tparam R The reference type
+ * \tparam D The type for differences between two iterators
+ */
+ template<class T, class V, class R = V&, class D = std::ptrdiff_t>
+ class ForwardIteratorFacade
+ {
+
+ public:
+ /* type aliases required by C++ for iterators */
+ using iterator_category = std::forward_iterator_tag;
+ using value_type = typename std::remove_const<V>::type;
+ using difference_type = D;
+ using pointer = V*;
+ using reference = R;
+
+ /**
+ * @brief The type of derived iterator.
+ *
+ * The iterator has to define following
+ * functions have to be present:
+ *
+ * \code
+ *
+ * // Access the value referred to.
+ * Reference dereference() const;
+ *
+ * // Compare for equality with iterator j
+ * bool equals(j);
+ *
+ * // position the iterator at the next element.
+ * void increment()
+ *
+ * // check for equality with other iterator
+ * bool equals(other)
+ * \endcode
+ *
+ * For an elaborate explanation see the
+ * <A HREF="http://www.sgi.com/tech/stl/iterator_traits.html">STL Documentation</A>!
+ */
+ typedef T DerivedType;
+
+ /**
+ * @brief The type of value accessed through the iterator.
+ */
+ typedef V Value;
+
+ /**
+ * @brief The pointer to the Value.
+ */
+ typedef V* Pointer;
+
+ /**
+ * @brief The type of the difference between two positions.
+ */
+ typedef D DifferenceType;
+
+ /**
+ * @brief The type of the reference to the values accessed.
+ */
+ typedef R Reference;
+
+ /** @brief Dereferencing operator. */
+ Reference operator*() const
+ {
+ return static_cast<DerivedType const*>(this)->dereference();
+ }
+
+ Pointer operator->() const
+ {
+ return &(static_cast<const DerivedType *>(this)->dereference());
+ }
+
+ /** @brief Preincrement operator. */
+ DerivedType& operator++()
+ {
+ static_cast<DerivedType *>(this)->increment();
+ return *static_cast<DerivedType *>(this);
+ }
+
+ /** @brief Postincrement operator. */
+ DerivedType operator++(int)
+ {
+ DerivedType tmp(static_cast<DerivedType const&>(*this));
+ this->operator++();
+ return tmp;
+ }
+ };
+
+ /**
+ * @brief Checks for equality.
+ *
+ * This operation is only defined if either D2
+ * is convertible to D1 or vice versa. If that is
+ * not the case the compiler will report an error
+ * as EnableIfInterOperable<D1,D2,bool>::type is
+ * not defined.
+ *
+ */
+ template<class T1, class V1, class R1, class D,
+ class T2, class V2, class R2>
+ inline typename EnableIfInterOperable<T1,T2,bool>::type
+ operator==(const ForwardIteratorFacade<T1,V1,R1,D>& lhs,
+ const ForwardIteratorFacade<T2,V2,R2,D>& rhs)
+ {
+ if(std::is_convertible<T2,T1>::value)
+ return static_cast<const T1&>(lhs).equals(static_cast<const T2&>(rhs));
+ else
+ return static_cast<const T2&>(rhs).equals(static_cast<const T1&>(lhs));
+ }
+
+ /**
+ * @brief Checks for inequality.
+ *
+ * This operation is only defined if either D2
+ * is convertible to D1 or vice versa. If that is
+ * not the case the compiler will report an error
+ * as EnableIfInterOperable<D1,D2,bool>::type is
+ * not defined.
+ *
+ */
+ template<class T1, class V1, class R1, class D,
+ class T2, class V2, class R2>
+ inline typename EnableIfInterOperable<T1,T2,bool>::type
+ operator!=(const ForwardIteratorFacade<T1,V1,R1,D>& lhs,
+ const ForwardIteratorFacade<T2,V2,R2,D>& rhs)
+ {
+ if(std::is_convertible<T2,T1>::value)
+ return !static_cast<const T1&>(lhs).equals(static_cast<const T2&>(rhs));
+ else
+ return !static_cast<const T2&>(rhs).equals(static_cast<const T1&>(lhs));
+ }
+
+ /**
+ * @brief Facade class for stl conformant bidirectional iterators.
+ *
+ */
+ template<class T, class V, class R = V&, class D = std::ptrdiff_t>
+ class BidirectionalIteratorFacade
+ {
+
+ public:
+ /* type aliases required by C++ for iterators */
+ using iterator_category = std::bidirectional_iterator_tag;
+ using value_type = typename std::remove_const<V>::type;
+ using difference_type = D;
+ using pointer = V*;
+ using reference = R;
+
+ /**
+ * @brief The type of derived iterator.
+ *
+ * The iterator has to define following
+ * functions have to be present:
+ *
+ * \code
+ *
+ * // Access the value referred to.
+ * Reference dereference() const;
+ *
+ * // Compare for equality with j
+ * bool equals(j);
+ *
+ * // position the iterator at the next element.
+ * void increment()
+ *
+ * // position the iterator at the previous element.
+ * void decrement()
+ *
+ * \endcode
+ *
+ * For an elaborate explanation see the
+ * <A HREF="http://www.sgi.com/tech/stl/iterator_traits.html">STL Documentation</A>
+ */
+ typedef T DerivedType;
+
+ /**
+ * @brief The type of value accessed through the iterator.
+ */
+ typedef V Value;
+
+ /**
+ * @brief The pointer to the Value.
+ */
+ typedef V* Pointer;
+
+ /**
+ * @brief The type of the difference between two positions.
+ */
+ typedef D DifferenceType;
+
+ /**
+ * @brief The type of the reference to the values accessed.
+ */
+ typedef R Reference;
+
+ /** @brief Dereferencing operator. */
+ Reference operator*() const
+ {
+ return static_cast<DerivedType const*>(this)->dereference();
+ }
+
+ Pointer operator->() const
+ {
+ return &(static_cast<const DerivedType *>(this)->dereference());
+ }
+
+ /** @brief Preincrement operator. */
+ DerivedType& operator++()
+ {
+ static_cast<DerivedType *>(this)->increment();
+ return *static_cast<DerivedType *>(this);
+ }
+
+ /** @brief Postincrement operator. */
+ DerivedType operator++(int)
+ {
+ DerivedType tmp(static_cast<DerivedType const&>(*this));
+ this->operator++();
+ return tmp;
+ }
+
+
+ /** @brief Preincrement operator. */
+ DerivedType& operator--()
+ {
+ static_cast<DerivedType *>(this)->decrement();
+ return *static_cast<DerivedType *>(this);
+ }
+
+ /** @brief Postincrement operator. */
+ DerivedType operator--(int)
+ {
+ DerivedType tmp(static_cast<DerivedType const&>(*this));
+ this->operator--();
+ return tmp;
+ }
+ };
+
+ /**
+ * @brief Checks for equality.
+ *
+ * This operation is only defined if T2 is convertible to T1, otherwise it
+ * is removed from the overload set since the enable_if for the return type
+ * yield an invalid type expression.
+ */
+ template<class T1, class V1, class R1, class D,
+ class T2, class V2, class R2>
+ inline typename std::enable_if<std::is_convertible<T2,T1>::value,bool>::type
+ operator==(const BidirectionalIteratorFacade<T1,V1,R1,D>& lhs,
+ const BidirectionalIteratorFacade<T2,V2,R2,D>& rhs)
+ {
+ return static_cast<const T1&>(lhs).equals(static_cast<const T2&>(rhs));
+ }
+
+ /**
+ * @brief Checks for equality.
+ *
+ * This operation is only defined if either T1 is convertible to T2, and T2
+ * is not convetible to T1. Otherwise the operator is removed from the
+ * overload set since the enable_if for the return type yield an invalid
+ * type expression.
+ */
+ template<class T1, class V1, class R1, class D,
+ class T2, class V2, class R2>
+ inline
+ typename std::enable_if<std::is_convertible<T1,T2>::value && !std::is_convertible<T2,T1>::value,
+ bool>::type
+ operator==(const BidirectionalIteratorFacade<T1,V1,R1,D>& lhs,
+ const BidirectionalIteratorFacade<T2,V2,R2,D>& rhs)
+ {
+ return static_cast<const T2&>(rhs).equals(static_cast<const T1&>(lhs));
+ }
+
+ /**
+ * @brief Checks for inequality.
+ *
+ * This operation is only defined if either D2
+ * is convertible to D1 or vice versa. If that is
+ * not the case the compiler will report an error
+ * as EnableIfInterOperable<D1,D2,bool>::type is
+ * not defined.
+ *
+ */
+ template<class T1, class V1, class R1, class D,
+ class T2, class V2, class R2>
+ inline typename EnableIfInterOperable<T1,T2,bool>::type
+ operator!=(const BidirectionalIteratorFacade<T1,V1,R1,D>& lhs,
+ const BidirectionalIteratorFacade<T2,V2,R2,D>& rhs)
+ {
+ return !(lhs == rhs);
+ }
+
+ /**
+ * @brief Base class for stl conformant forward iterators.
+ *
+ */
+ template<class T, class V, class R = V&, class D = std::ptrdiff_t>
+ class RandomAccessIteratorFacade
+ {
+
+ public:
+ /* type aliases required by C++ for iterators */
+ using iterator_category = std::random_access_iterator_tag;
+ using value_type = typename std::remove_const<V>::type;
+ using difference_type = D;
+ using pointer = V*;
+ using reference = R;
+
+ /**
+ * @brief The type of derived iterator.
+ *
+ * The iterator has to define following
+ * functions have to be present:
+ *
+ * \code
+ *
+ * // Access the value referred to.
+ * Reference dereference() const;
+ * // Access the value at some other location
+ * Reference elementAt(n) const;
+ *
+ * // Compare for equality with j
+ * bool equals(j);
+ *
+ * // position the iterator at the next element.
+ * void increment()
+ *
+ * // position the iterator at the previous element.
+ * void decrement()
+ *
+ * // advance the iterator by a number of positions-
+ * void advance(DifferenceType n);
+ * // calculate the distance to another iterator.
+ * // One should incorporate an assertion whether
+ * // the same containers are referenced
+ * DifferenceType distanceTo(j) const;
+ * \endcode
+ *
+ * For an elaborate explanation see the
+ * <A HREF="http://www.sgi.com/tech/stl/iterator_traits.html">STL Documentation</A>
+ */
+ typedef T DerivedType;
+
+ /**
+ * @brief The type of value accessed through the iterator.
+ */
+ typedef V Value;
+
+ /**
+ * @brief The pointer to the Value.
+ */
+ typedef V* Pointer;
+
+ /**
+ * @brief The type of the difference between two positions.
+ */
+ typedef D DifferenceType;
+
+ /**
+ * @brief The type of the reference to the values accessed.
+ */
+ typedef R Reference;
+
+ /** @brief Dereferencing operator. */
+ Reference operator*() const
+ {
+ return static_cast<DerivedType const*>(this)->dereference();
+ }
+
+ Pointer operator->() const
+ {
+ return &(static_cast<const DerivedType *>(this)->dereference());
+ }
+
+ /**
+ * @brief Get the element n positions from the current one.
+ * @param n The distance to the element.
+ * @return The element at that distance.
+ */
+ Reference operator[](DifferenceType n) const
+ {
+ return static_cast<const DerivedType *>(this)->elementAt(n);
+ }
+
+ /** @brief Preincrement operator. */
+ DerivedType& operator++()
+ {
+ static_cast<DerivedType *>(this)->increment();
+ return *static_cast<DerivedType *>(this);
+ }
+
+ /** @brief Postincrement operator. */
+ DerivedType operator++(int)
+ {
+ DerivedType tmp(static_cast<DerivedType const&>(*this));
+ this->operator++();
+ return tmp;
+ }
+
+ DerivedType& operator+=(DifferenceType n)
+ {
+ static_cast<DerivedType *>(this)->advance(n);
+ return *static_cast<DerivedType *>(this);
+ }
+
+ DerivedType operator+(DifferenceType n) const
+ {
+ DerivedType tmp(static_cast<DerivedType const&>(*this));
+ tmp.advance(n);
+ return tmp;
+ }
+
+
+ /** @brief Predecrement operator. */
+ DerivedType& operator--()
+ {
+ static_cast<DerivedType *>(this)->decrement();
+ return *static_cast<DerivedType *>(this);
+ }
+
+ /** @brief Postdecrement operator. */
+ DerivedType operator--(int)
+ {
+ DerivedType tmp(static_cast<DerivedType const&>(*this));
+ this->operator--();
+ return tmp;
+ }
+
+ DerivedType& operator-=(DifferenceType n)
+ {
+ static_cast<DerivedType *>(this)->advance(-n);
+ return *static_cast<DerivedType *>(this);
+ }
+
+ DerivedType operator-(DifferenceType n) const
+ {
+ DerivedType tmp(static_cast<DerivedType const&>(*this));
+ tmp.advance(-n);
+ return tmp;
+ }
+
+
+ };
+
+ /**
+ * @brief Checks for equality.
+ *
+ * This operation is only defined if either D2
+ * is convertible to D1 or vice versa. If that is
+ * not the case the compiler will report an error
+ * as EnableIfInterOperable<D1,D2,bool>::type is
+ * not defined.
+ *
+ */
+ template<class T1, class V1, class R1, class D,
+ class T2, class V2, class R2>
+ inline typename EnableIfInterOperable<T1,T2,bool>::type
+ operator==(const RandomAccessIteratorFacade<T1,V1,R1,D>& lhs,
+ const RandomAccessIteratorFacade<T2,V2,R2,D>& rhs)
+ {
+ if(std::is_convertible<T2,T1>::value)
+ return static_cast<const T1&>(lhs).equals(static_cast<const T2&>(rhs));
+ else
+ return static_cast<const T2&>(rhs).equals(static_cast<const T1&>(lhs));
+ }
+
+ /**
+ * @brief Checks for inequality.
+ *
+ * This operation is only defined if either D2
+ * is convertible to D1 or vice versa. If that is
+ * not the case the compiler will report an error
+ * as EnableIfInterOperable<D1,D2,bool>::type is
+ * not defined.
+ *
+ */
+ template<class T1, class V1, class R1, class D,
+ class T2, class V2, class R2>
+ inline typename EnableIfInterOperable<T1,T2,bool>::type
+ operator!=(const RandomAccessIteratorFacade<T1,V1,R1,D>& lhs,
+ const RandomAccessIteratorFacade<T2,V2,R2,D>& rhs)
+ {
+ if(std::is_convertible<T2,T1>::value)
+ return !static_cast<const T1&>(lhs).equals(static_cast<const T2&>(rhs));
+ else
+ return !static_cast<const T2&>(rhs).equals(static_cast<const T1&>(lhs));
+ }
+
+ /**
+ * @brief Comparison operator.
+ *
+ * This operation is only defined if either D2
+ * is convertible to D1 or vice versa. If that is
+ * not the case the compiler will report an error
+ * as EnableIfInterOperable<D1,D2,bool>::type is
+ * not defined.
+ *
+ */
+ template<class T1, class V1, class R1, class D,
+ class T2, class V2, class R2>
+ inline typename EnableIfInterOperable<T1,T2,bool>::type
+ operator<(const RandomAccessIteratorFacade<T1,V1,R1,D>& lhs,
+ const RandomAccessIteratorFacade<T2,V2,R2,D>& rhs)
+ {
+ if(std::is_convertible<T2,T1>::value)
+ return static_cast<const T1&>(lhs).distanceTo(static_cast<const T2&>(rhs))>0;
+ else
+ return static_cast<const T2&>(rhs).distanceTo(static_cast<const T1&>(lhs))<0;
+ }
+
+
+ /**
+ * @brief Comparison operator.
+ *
+ * This operation is only defined if either D2
+ * is convertible to D1 or vice versa. If that is
+ * not the case the compiler will report an error
+ * as EnableIfInterOperable<D1,D2,bool>::type is
+ * not defined.
+ *
+ */
+ template<class T1, class V1, class R1, class D,
+ class T2, class V2, class R2>
+ inline typename EnableIfInterOperable<T1,T2,bool>::type
+ operator<=(const RandomAccessIteratorFacade<T1,V1,R1,D>& lhs,
+ const RandomAccessIteratorFacade<T2,V2,R2,D>& rhs)
+ {
+ if(std::is_convertible<T2,T1>::value)
+ return static_cast<const T1&>(lhs).distanceTo(static_cast<const T2&>(rhs))>=0;
+ else
+ return static_cast<const T2&>(rhs).distanceTo(static_cast<const T1&>(lhs))<=0;
+ }
+
+
+ /**
+ * @brief Comparison operator.
+ *
+ * This operation is only defined if either D2
+ * is convertible to D1 or vice versa. If that is
+ * not the case the compiler will report an error
+ * as EnableIfInterOperable<D1,D2,bool>::type is
+ * not defined.
+ *
+ */
+ template<class T1, class V1, class R1, class D,
+ class T2, class V2, class R2>
+ inline typename EnableIfInterOperable<T1,T2,bool>::type
+ operator>(const RandomAccessIteratorFacade<T1,V1,R1,D>& lhs,
+ const RandomAccessIteratorFacade<T2,V2,R2,D>& rhs)
+ {
+ if(std::is_convertible<T2,T1>::value)
+ return static_cast<const T1&>(lhs).distanceTo(static_cast<const T2&>(rhs))<0;
+ else
+ return static_cast<const T2&>(rhs).distanceTo(static_cast<const T1&>(lhs))>0;
+ }
+
+ /**
+ * @brief Comparison operator.
+ *
+ * This operation is only defined if either D2
+ * is convertible to D1 or vice versa. If that is
+ * not the case the compiler will report an error
+ * as EnableIfInterOperable<D1,D2,bool>::type is
+ * not defined.
+ *
+ */
+ template<class T1, class V1, class R1, class D,
+ class T2, class V2, class R2>
+ inline typename EnableIfInterOperable<T1,T2,bool>::type
+ operator>=(const RandomAccessIteratorFacade<T1,V1,R1,D>& lhs,
+ const RandomAccessIteratorFacade<T2,V2,R2,D>& rhs)
+ {
+ if(std::is_convertible<T2,T1>::value)
+ return static_cast<const T1&>(lhs).distanceTo(static_cast<const T2&>(rhs))<=0;
+ else
+ return static_cast<const T2&>(rhs).distanceTo(static_cast<const T1&>(lhs))>=0;
+ }
+
+ /**
+ * @brief Calculates the difference between two pointers.
+ *
+ * This operation is only defined if either D2
+ * is convertible to D1 or vice versa. If that is
+ * not the case the compiler will report an error
+ * as EnableIfInterOperable<D1,D2,bool>::type is
+ * not defined.
+ *
+ */
+ template<class T1, class V1, class R1, class D,
+ class T2, class V2, class R2>
+ inline typename EnableIfInterOperable<T1,T2,D>::type
+ operator-(const RandomAccessIteratorFacade<T1,V1,R1,D>& lhs,
+ const RandomAccessIteratorFacade<T2,V2,R2,D>& rhs)
+ {
+ if(std::is_convertible<T2,T1>::value)
+ return -static_cast<const T1&>(lhs).distanceTo(static_cast<const T2&>(rhs));
+ else
+ return static_cast<const T2&>(rhs).distanceTo(static_cast<const T1&>(lhs));
+ }
+
+ /** @} */
+}
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_COMMON_ITERATORRANGE_HH
+#define DUNE_COMMON_ITERATORRANGE_HH
+
+namespace Dune {
+
+ //! Simple range between a begin and an end iterator.
+ /**
+ * IteratorRange is mainly useful as a lightweight adaptor
+ * class when adding support for range-based for loops to
+ * existing containers that lack a standard begin(), end()
+ * pair of member functions.
+ *
+ * \tparam Iterator The type of iterator
+ * \ingroup CxxUtilities
+ */
+ template<typename Iterator>
+ class IteratorRange
+ {
+
+ public:
+
+ //! The iterator belonging to this range.
+ typedef Iterator iterator;
+
+ //! The iterator belonging to this range.
+ /**
+ * This typedef is here mainly for compatibility reasons.
+ */
+ typedef Iterator const_iterator;
+
+ //! Constructs an iterator range on [begin,end).
+ IteratorRange(const Iterator& begin, const Iterator& end)
+ : _begin(begin)
+ , _end(end)
+ {}
+
+ //! Default constructor, relies on iterators being default-constructible.
+ IteratorRange()
+ {}
+
+ //! Returns an iterator pointing to the begin of the range.
+ iterator begin() const
+ {
+ return _begin;
+ }
+
+ //! Returns an iterator pointing past the end of the range.
+ iterator end() const
+ {
+ return _end;
+ }
+
+ private:
+
+ Iterator _begin;
+ Iterator _end;
+
+ };
+
+}
+
+#endif // DUNE_COMMON_ITERATORRANGE_HH
--- /dev/null
+#ifndef DUNE_COMMON_KEYWORDS_HH
+#define DUNE_COMMON_KEYWORDS_HH
+
+/** \file
+ * \brief Definitions of several macros that conditionally make C++ syntax
+ * available.
+ *
+ * This header contains several macros that enable C++ features depending on your
+ * compiler. Most of these features are optional and provide additional functionality
+ * like making code constexpr.
+ *
+ * \ingroup CxxUtilities
+ */
+
+
+#if __cpp_inline_variables >= 201606
+#define DUNE_INLINE_VARIABLE inline
+#else
+//! Preprocessor macro used for marking variables inline on supported compilers.
+/**
+ * \ingroup CxxUtilities
+ */
+#define DUNE_INLINE_VARIABLE
+#endif
+
+
+#if __cpp_constexpr >= 201304
+#define DUNE_GENERALIZED_CONSTEXPR constexpr
+#else
+//! Preprocessor macro used for marking code as constexpr under the relaxed rules of C++14 if supported by the compiler.
+/**
+ * \ingroup CxxUtilities
+ */
+#define DUNE_GENERALIZED_CONSTEXPR
+#endif
+
+
+#endif // DUNE_COMMON_KEYWORDS_HH
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_LCM_HH
+#define DUNE_LCM_HH
+
+#warning "This header is deprecated and will be removed after release 2.8. Use std::lcm instead."
+
+/** \file
+ * \brief Statically compute the least common multiple of two integers
+ */
+
+#include <numeric>
+
+namespace Dune
+{
+
+ /**
+ * @addtogroup Common
+ * @{
+ */
+ /**
+ * @file
+ * This file provides template constructs for calculation the
+ * least common multiple.
+ */
+
+ /**
+ * @brief Calculate the least common multiple of two numbers
+ */
+ template<long m, long n>
+ struct [[deprecated("Will be removed after Dune 2.8. Use std::lcm instead.")]] Lcm
+ {
+ static void conceptCheck()
+ {
+ static_assert(0<m, "m must be positive!");
+ static_assert(0<n, "n must be positive!");
+ }
+ /**
+ * @brief The least common multiple of the template parameters
+ * m and n.
+ */
+ constexpr static long value = std::lcm(m,n);
+ };
+}
+
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_COMMON_LRU_HH
+#define DUNE_COMMON_LRU_HH
+
+#include <list>
+#include <utility>
+#include <map>
+#include <memory>
+
+#include <dune/common/exceptions.hh>
+
+/** @file
+ @author Christian Engwer
+ @brief LRU Cache Container, using an STL like interface
+ */
+
+namespace Dune {
+
+ namespace {
+
+ /*
+ hide the default traits in an empty namespace
+ */
+ template <typename Key, typename Tp,
+ typename Alloc = std::allocator<Tp> >
+ struct _lru_default_traits
+ {
+ typedef Key key_type;
+ typedef Alloc allocator;
+ typedef std::list< std::pair<Key, Tp> > list_type;
+ typedef typename list_type::iterator iterator;
+ typedef typename std::less<key_type> cmp;
+ typedef std::map< key_type, iterator, cmp,
+ typename std::allocator_traits<allocator>::template rebind_alloc<std::pair<const key_type, iterator> > > map_type;
+ };
+
+ } // end empty namespace
+
+ /**
+ @brief LRU Cache Container
+
+ Implementation of an LRU (least recently used) cache
+ container. This implementation follows the approach presented in
+ http://aim.adc.rmit.edu.au/phd/sgreuter/papers/graphite2003.pdf
+ */
+ template <typename Key, typename Tp,
+ typename Traits = _lru_default_traits<Key, Tp> >
+ class lru
+ {
+ typedef typename Traits::list_type list_type;
+ typedef typename Traits::map_type map_type;
+ typedef typename Traits::allocator allocator;
+ typedef typename map_type::iterator map_iterator;
+ typedef typename map_type::const_iterator const_map_iterator;
+
+ public:
+ typedef typename Traits::key_type key_type;
+ typedef typename allocator::value_type value_type;
+ using pointer = typename allocator::value_type*;
+ using const_pointer = typename allocator::value_type const*;
+ using const_reference = typename allocator::value_type const&;
+ using reference = typename allocator::value_type&;
+ typedef typename allocator::size_type size_type;
+ typedef typename list_type::iterator iterator;
+ typedef typename list_type::const_iterator const_iterator;
+
+ /**
+ * Returns a read/write reference to the data of the most
+ * recently used entry.
+ */
+ reference front()
+ {
+ return _data.front().second;
+ }
+
+ /**
+ * Returns a read-only (constant) reference to the data of the
+ * most recently used entry.
+ */
+ const_reference front() const
+ {
+ return _data.front().second;
+ }
+
+ /**
+ * Returns a read/write reference to the data of the least
+ * recently used entry.
+ */
+ reference back()
+ {
+ return _data.back().second;
+ }
+
+ /**
+ * Returns a read-only (constant) reference to the data of the
+ * least recently used entry.
+ */
+ const_reference back ([[maybe_unused]] int i) const
+ {
+ return _data.back().second;
+ }
+
+
+ /**
+ * @brief Removes the first element.
+ */
+ void pop_front()
+ {
+ key_type k = _data.front().first;
+ _data.pop_front();
+ _index.erase(k);
+ }
+ /**
+ * @brief Removes the last element.
+ */
+ void pop_back()
+ {
+ key_type k = _data.back().first;
+ _data.pop_back();
+ _index.erase(k);
+ }
+
+ /**
+ * @brief Finds the element whose key is k.
+ *
+ * @return iterator
+ */
+ iterator find (const key_type & key)
+ {
+ const map_iterator it = _index.find(key);
+ if (it == _index.end()) return _data.end();
+ return it->second;
+ }
+
+ /**
+ * @brief Finds the element whose key is k.
+ *
+ * @return const_iterator
+ */
+ const_iterator find (const key_type & key) const
+ {
+ const map_iterator it = _index.find(key);
+ if (it == _index.end()) return _data.end();
+ return it->second;
+ }
+
+ /**
+ * @brief Insert a value into the container
+ *
+ * Stores value under key and marks it as most recent. If this key
+ * is already present, the associated data is replaced.
+ *
+ * @param key associated with data
+ * @param data to store
+ *
+ * @return reference of stored data
+ */
+ reference insert (const key_type & key, const_reference data)
+ {
+ std::pair<key_type, value_type> x(key, data);
+ /* insert item as mru */
+ iterator it = _data.insert(_data.begin(), x);
+ /* store index */
+ _index.insert(std::make_pair(key,it));
+
+ return it->second;
+ }
+
+ /**
+ * @copydoc touch
+ */
+ reference insert (const key_type & key)
+ {
+ return touch (key);
+ }
+
+ /**
+ * @brief mark data associated with key as most recent
+ *
+ * @return reference of stored data
+ */
+ reference touch (const key_type & key)
+ {
+ /* query _index for iterator */
+ map_iterator it = _index.find(key);
+ if (it == _index.end())
+ DUNE_THROW(Dune::RangeError,
+ "Failed to touch key " << key << ", it is not in the lru container");
+ /* update _data
+ move it to the front
+ */
+ _data.splice(_data.begin(), _data, it->second);
+ return it->second->second;
+ }
+
+ /**
+ * @brief Retrieve number of entries in the container
+ */
+ size_type size() const
+ {
+ return _data.size();
+ }
+
+ /**
+ * @brief ensure a maximum size of the container
+ *
+ * If new_size is smaller than size the oldest elements are
+ * dropped. Otherwise nothing happens.
+ */
+ void resize(size_type new_size)
+ {
+ assert(new_size <= size());
+
+ while (new_size < size())
+ pop_back();
+ }
+
+ /**
+ *
+ */
+ void clear()
+ {
+ _data.clear();
+ _index.clear();
+ }
+
+ private:
+ list_type _data;
+ map_type _index;
+
+ };
+
+} // namespace Dune
+
+#endif // DUNE_COMMON_LRU_HH
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_MALLOC_ALLOCATOR_HH
+#define DUNE_MALLOC_ALLOCATOR_HH
+
+#include <exception>
+#include <cstdlib>
+#include <new>
+#include <utility>
+
+/**
+ * @file
+ * @brief Allocators that use malloc/free.
+ */
+namespace Dune
+{
+ /**
+ @ingroup Allocators
+ @brief Allocators implementation which simply calls malloc/free
+ */
+ template <class T>
+ class MallocAllocator {
+ public:
+ typedef std::size_t size_type;
+ typedef std::ptrdiff_t difference_type;
+ typedef T* pointer;
+ typedef const T* const_pointer;
+ typedef T& reference;
+ typedef const T& const_reference;
+ typedef T value_type;
+ template <class U> struct rebind {
+ typedef MallocAllocator<U> other;
+ };
+
+ //! create a new MallocAllocator
+ MallocAllocator() noexcept {}
+ //! copy construct from an other MallocAllocator, possibly for a different result type
+ template <class U>
+ MallocAllocator(const MallocAllocator<U>&) noexcept {}
+ //! cleanup this allocator
+ ~MallocAllocator() noexcept {}
+
+ pointer address(reference x) const
+ {
+ return &x;
+ }
+ const_pointer address(const_reference x) const
+ {
+ return &x;
+ }
+
+ //! allocate n objects of type T
+ pointer allocate(size_type n,
+ [[maybe_unused]] const void* hint = 0)
+ {
+ if (n > this->max_size())
+ throw std::bad_alloc();
+
+ pointer ret = static_cast<pointer>(std::malloc(n * sizeof(T)));
+ if (!ret)
+ throw std::bad_alloc();
+ return ret;
+ }
+
+ //! deallocate n objects of type T at address p
+ void deallocate(pointer p, [[maybe_unused]] size_type n)
+ {
+ std::free(p);
+ }
+
+ //! max size for allocate
+ size_type max_size() const noexcept
+ {
+ return size_type(-1) / sizeof(T);
+ }
+
+ //! copy-construct an object of type T (i.e. make a placement new on p)
+ void construct(pointer p, const T& val)
+ {
+ ::new((void*)p)T(val);
+ }
+
+ //! construct an object of type T from variadic parameters
+ template<typename ... Args>
+ void construct(pointer p, Args&&... args)
+ {
+ ::new((void *)p)T(std::forward<Args>(args) ...);
+ }
+
+ //! destroy an object of type T (i.e. call the destructor)
+ void destroy(pointer p)
+ {
+ p->~T();
+ }
+ };
+
+ //! check whether allocators are equivalent
+ template<class T>
+ constexpr bool
+ operator==(const MallocAllocator<T> &, const MallocAllocator<T> &)
+ {
+ return true;
+ }
+
+ //! check whether allocators are not equivalent
+ template<class T>
+ constexpr bool
+ operator!=(const MallocAllocator<T> &, const MallocAllocator<T> &)
+ {
+ return false;
+ }
+}
+
+#endif // DUNE_MALLOC_ALLOCATOR_HH
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_MATH_HH
+#define DUNE_MATH_HH
+
+/** \file
+ * \brief Some useful basic math stuff
+ */
+
+#include <cmath>
+#include <complex>
+#include <limits>
+#include <type_traits>
+
+#include <dune/common/typeutilities.hh>
+
+namespace Dune
+{
+
+ /**
+ \brief Standard implementation of MathematicalConstants.
+
+ This implementation will work with all built-in floating point
+ types. It provides
+
+ * e as exp(1.0)
+ * pi as acos(-1.0)
+
+ */
+ template< class T >
+ struct StandardMathematicalConstants
+ {
+ /**
+ * \brief Euler's number
+ */
+ static const T e ()
+ {
+ using std::exp;
+ static const T e = exp( T( 1 ) );
+ return e;
+ }
+
+ /**
+ * \brief Archimedes' constant
+ */
+ static const T pi ()
+ {
+ using std::acos;
+ static const T pi = acos( T( -1 ) );
+ return pi;
+ }
+ };
+
+
+ /**
+ \brief Provides commonly used mathematical constants.
+
+ a struct that is specialized for types repesenting real or complex
+ numbers. It provides commonly used mathematical constants with the
+ required accuary for the specified type.
+ */
+ template< class Field >
+ struct MathematicalConstants
+ : public StandardMathematicalConstants<Field>
+ {};
+
+
+ /** \brief Power method for integer exponents
+ *
+ * \note Make sure that Mantissa is a non-integer type when using negative exponents!
+ */
+ template <class Mantissa, class Exponent>
+ constexpr Mantissa power(Mantissa m, Exponent p)
+ {
+ static_assert(std::numeric_limits<Exponent>::is_integer, "Exponent must be an integer type!");
+
+ auto result = Mantissa(1);
+ auto absp = (p<0) ? -p : p; // This is simply abs, but std::abs is not constexpr
+ for (Exponent i = Exponent(0); i<absp; i++)
+ result *= m;
+
+ if (p<0)
+ result = Mantissa(1)/result;
+
+ return result;
+ }
+
+ //! Calculates the factorial of m at compile time
+ template <int m>
+ struct Factorial
+ {
+ //! factorial stores m!
+ enum { factorial = m * Factorial<m-1>::factorial };
+ };
+
+ //! end of recursion of factorial via specialization
+ template <>
+ struct Factorial<0>
+ {
+ // 0! = 1
+ enum { factorial = 1 };
+ };
+
+
+ //! calculate the factorial of n as a constexpr
+ // T has to be an integral type
+ template<class T>
+ constexpr inline static T factorial(const T& n) noexcept
+ {
+ static_assert(std::numeric_limits<T>::is_integer, "`factorial(n)` has to be called with an integer type.");
+ T fac = 1;
+ for(T k = 0; k < n; ++k)
+ fac *= k+1;
+ return fac;
+ }
+
+ //! calculate the factorial of n as a constexpr
+ template<class T, T n>
+ constexpr inline static auto factorial (std::integral_constant<T, n>) noexcept
+ {
+ return std::integral_constant<T, factorial(n)>{};
+ }
+
+
+ //! calculate the binomial coefficient n over k as a constexpr
+ // T has to be an integral type
+ template<class T>
+ constexpr inline static T binomial (const T& n, const T& k) noexcept
+ {
+ static_assert(std::numeric_limits<T>::is_integer, "`binomial(n, k)` has to be called with an integer type.");
+
+ if( k < 0 || k > n )
+ return 0;
+
+ if (2*k > n)
+ return binomial(n, n-k);
+
+ T bin = 1;
+ for(auto i = n-k; i < n; ++i)
+ bin *= i+1;
+ return bin / factorial(k);
+ }
+
+ //! calculate the binomial coefficient n over k as a constexpr
+ template<class T, T n, T k>
+ constexpr inline static auto binomial (std::integral_constant<T, n>, std::integral_constant<T, k>) noexcept
+ {
+ return std::integral_constant<T, binomial(n, k)>{};
+ }
+
+ template<class T, T n>
+ constexpr inline static auto binomial (std::integral_constant<T, n>, std::integral_constant<T, n>) noexcept
+ {
+ return std::integral_constant<T, (n >= 0 ? 1 : 0)>{};
+ }
+
+
+ //! compute conjugate complex of x
+ // conjugate complex does nothing for non-complex types
+ template<class K>
+ inline K conjugateComplex (const K& x)
+ {
+ return x;
+ }
+
+#ifndef DOXYGEN
+ // specialization for complex
+ template<class K>
+ inline std::complex<K> conjugateComplex (const std::complex<K>& c)
+ {
+ return std::complex<K>(c.real(),-c.imag());
+ }
+#endif
+
+ //! Return the sign of the value
+ template <class T>
+ int sign(const T& val)
+ {
+ return (val < 0 ? -1 : 1);
+ }
+
+
+ namespace Impl {
+ // Returns whether a given type behaves like std::complex<>, i.e. whether
+ // real() and imag() are defined
+ template<class T>
+ struct isComplexLike {
+ private:
+ template<class U>
+ static auto test(U* u) -> decltype(u->real(), u->imag(), std::true_type());
+
+ template<class U>
+ static auto test(...) -> decltype(std::false_type());
+
+ public:
+ static const bool value = decltype(test<T>(0))::value;
+ };
+ } // namespace Impl
+
+ //! namespace for customization of math functions with Dune-Semantics
+ /**
+ You can add overloads for the Dune-semantics of math-functions in this
+ namespace. These overloads will be used by functors like `Dune::isNaN`
+ to implement these functions, and will be preferred over functions found
+ by ADL, or the corresponding functions from the standard (whether they
+ are found by ADL or in the namespace `std`.
+
+ PriorityTag
+ ===========
+
+ There are two predefined priorities:
+
+ <1> provides a default implementation, only applicable if the
+ camelCase-Version of the function (e.g. `isNaN`) can be found via ADL
+ for an argument of type `T`. (Otherwise the overload should not
+ participate in overload resolution.)
+
+ <0> provides a default implementation that forwards the call to the
+ lower-case version of the function (e.g. `isnan`), found via ADL and
+ the namespace `std`.
+
+ Any higher priority up to 10 can be used by other overloads.
+ */
+ namespace MathOverloads {
+
+ //! Tag to make sure the functions in this namespace can be found by ADL.
+ struct ADLTag {};
+
+#define DUNE_COMMON_MATH_ISFUNCTION(function, stdfunction) \
+ template<class T> \
+ auto function(const T &t, PriorityTag<1>, ADLTag) \
+ -> decltype(function(t)) { \
+ return function(t); \
+ } \
+ template<class T> \
+ auto function(const T &t, PriorityTag<0>, ADLTag) { \
+ using std::stdfunction; \
+ return stdfunction(t); \
+ } \
+ static_assert(true, "Require semicolon to unconfuse editors")
+
+ DUNE_COMMON_MATH_ISFUNCTION(isNaN,isnan);
+ DUNE_COMMON_MATH_ISFUNCTION(isInf,isinf);
+ DUNE_COMMON_MATH_ISFUNCTION(isFinite,isfinite);
+#undef DUNE_COMMON_MATH_ISFUNCTION
+
+ template<class T>
+ auto isUnordered(const T &t1, const T &t2, PriorityTag<1>, ADLTag)
+ -> decltype(isUnordered(t1, t2)) {
+ return isUnordered(t1, t2);
+ }
+
+ template<class T>
+ auto isUnordered(const T &t1, const T &t2, PriorityTag<0>, ADLTag) {
+ using std::isunordered;
+ return isunordered(t1, t2);
+ }
+ }
+
+ namespace MathImpl {
+
+ // NOTE: it is important that these functors have names different from the
+ // names of the functions they are forwarding to. Otherwise the
+ // unqualified call would find the functor type, not a function, and ADL
+ // would never be attempted.
+#define DUNE_COMMON_MATH_ISFUNCTION_FUNCTOR(function) \
+ struct function##Impl { \
+ template<class T> \
+ constexpr auto operator()(const T &t) const { \
+ return function(t, PriorityTag<10>{}, MathOverloads::ADLTag{}); \
+ } \
+ }; \
+ static_assert(true, "Require semicolon to unconfuse editors")
+
+ DUNE_COMMON_MATH_ISFUNCTION_FUNCTOR(isNaN);
+ DUNE_COMMON_MATH_ISFUNCTION_FUNCTOR(isInf);
+ DUNE_COMMON_MATH_ISFUNCTION_FUNCTOR(isFinite);
+#undef DUNE_COMMON_MATH_ISFUNCTION_FUNCTOR
+
+ struct isUnorderedImpl {
+ template<class T>
+ constexpr auto operator()(const T &t1, const T &t2) const {
+ return isUnordered(t1, t2, PriorityTag<10>{}, MathOverloads::ADLTag{});
+ }
+ };
+
+ } //MathImpl
+
+
+ namespace Impl {
+ /* This helper has a math functor as a static constexpr member. Doing
+ this as a static member of a template struct means we can do this
+ without violating the ODR or putting the definition into a seperate
+ compilation unit, while still still ensuring the functor is the same
+ lvalue across all compilation units.
+ */
+ template<class T>
+ struct MathDummy
+ {
+ static constexpr T value{};
+ };
+
+ template<class T>
+ constexpr T MathDummy<T>::value;
+
+ } //namespace Impl
+
+ namespace {
+ /* Provide the math functors directly in the `Dune` namespace.
+
+ This actually declares a different name in each translation unit, but
+ they all resolve to the same lvalue.
+ */
+
+ //! check wether the argument is NaN
+ /**
+ * Dune-Semantic: for multi-valued types (complex, vectors), check whether
+ * *any* value is NaN.
+ */
+ constexpr auto const &isNaN = Impl::MathDummy<MathImpl::isNaNImpl>::value;
+
+ //! check wether the argument is infinite or NaN
+ /**
+ * Dune-Semantic: for multi-valued types (complex, vectors), check whether
+ * *any* value is infinite or NaN.
+ */
+ constexpr auto const &isInf = Impl::MathDummy<MathImpl::isInfImpl>::value;
+
+ //! check wether the argument is finite and non-NaN
+ /**
+ * Dune-Semantic: for multi-valued types (complex, vectors), check whether
+ * *all* values are finite and non-NaN.
+ */
+ constexpr auto const &isFinite = Impl::MathDummy<MathImpl::isFiniteImpl>::value;
+
+ //! check wether the arguments are ordered
+ /**
+ * Dune-Semantic: for multi-valued types (complex, vectors), there is
+ * never an ordering, so at the moment these types are not supported as
+ * arguments.
+ */
+ constexpr auto const &isUnordered = Impl::MathDummy<MathImpl::isUnorderedImpl>::value;
+ }
+
+ namespace MathOverloads {
+ /*Overloads for complex types*/
+ template<class T, class = std::enable_if_t<Impl::isComplexLike<T>::value> >
+ auto isNaN(const T &t, PriorityTag<2>, ADLTag) {
+ return Dune::isNaN(real(t)) || Dune::isNaN(imag(t));
+ }
+
+ template<class T, class = std::enable_if_t<Impl::isComplexLike<T>::value> >
+ auto isInf(const T &t, PriorityTag<2>, ADLTag) {
+ return Dune::isInf(real(t)) || Dune::isInf(imag(t));
+ }
+
+ template<class T, class = std::enable_if_t<Impl::isComplexLike<T>::value> >
+ auto isFinite(const T &t, PriorityTag<2>, ADLTag) {
+ return Dune::isFinite(real(t)) && Dune::isFinite(imag(t));
+ }
+ } //MathOverloads
+}
+
+#endif // #ifndef DUNE_MATH_HH
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_MATVECTRAITS_HH
+#define DUNE_MATVECTRAITS_HH
+
+/** \file
+ * \brief Documentation of the traits classes you need to write for each implementation of DenseVector or DenseMatrix
+ */
+
+namespace Dune {
+
+ /**
+ @addtogroup DenseMatVec
+ \brief Type Traits to retrieve types associated with an implementation of Dune::DenseVector or Dune::DenseMatrix
+
+ you have to specialize this class for every implementation of DenseVector or DenseMatrix.
+
+ \code
+ //! export the type of the derived class (e.g. FieldVector<K,SIZE>)
+ typedef ... derived_type;
+ //! export the type of the stored values
+ typedef ... value_type;
+ //! export the type representing the size information
+ typedef ... size_type;
+ \endcode
+
+ */
+ template<class T>
+ struct DenseMatVecTraits {};
+
+} // end namespace Dune
+
+#endif // DUNE_FTRAITS_HH
--- /dev/null
+/* This file determines the order how things appear in the doxygen
+ documentation within the dune-common module. It works like this:
+
+ @defgroup commands appear only in this file here which is
+ parsed before the other files (because it is mentioned first
+ in the Doxyfile).
+
+ Only @addtogroup is used in the code documentation.
+*/
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_COMMON_OVERLOADSET_HH
+#define DUNE_COMMON_OVERLOADSET_HH
+
+#include <utility>
+#include <type_traits>
+#include <dune/common/typetraits.hh>
+
+namespace Dune {
+
+namespace Impl {
+
+ template<typename... F>
+ class OverloadSet
+ : public F...
+ {
+
+ public:
+
+ template<typename... FF>
+ OverloadSet(FF&&... ff)
+ : F(std::forward<FF>(ff))...
+ {}
+
+ using F::operator()...;
+
+ };
+
+} // end namespace Impl
+
+
+
+/**
+ * \brief Create an overload set
+ *
+ * \tparam F List of function object types
+ * \param f List of function objects
+ *
+ * This returns an object that contains all
+ * operator() implementations of the passed
+ * functions. All those are available when
+ * calling operator() of the returned object.
+ *
+ * The returned object derives from
+ * those implementations such that it contains
+ * all operator() implementations in its
+ * overload set. When calling operator()
+ * this will select the best overload.
+ * If multiple overload are equally good this
+ * will lead to ambiguity.
+ *
+ * Notice that the passed function objects are
+ * stored by value and must be copy-constructible.
+ *
+ * \ingroup CxxUtilities
+ */
+template<class... F>
+auto overload(F&&... f)
+{
+ return Impl::OverloadSet<std::decay_t<F>...>(std::forward<F>(f)...);
+}
+
+
+
+namespace Impl {
+
+ template<class F0, class... F>
+ class OrderedOverloadSet: public OrderedOverloadSet<F...>, F0
+ {
+ using Base = OrderedOverloadSet<F...>;
+ public:
+
+ template<class FF0, class... FF>
+ OrderedOverloadSet(FF0&& f0, FF&&... ff) :
+ Base(std::forward<FF>(ff)...),
+ F0(std::forward<FF0>(f0))
+ {}
+
+ // Forward to operator() of F0 if it can be called with the given arguments.
+ template<class... Args,
+ std::enable_if_t<IsCallable<F0(Args&&...)>::value, int> = 0>
+ decltype(auto) operator()(Args&&... args)
+ {
+ return F0::operator()(std::forward<Args>(args)...);
+ }
+
+ // Forward to operator() of base class if F0 cannot be called with the given
+ // arguments. In this case the base class will successively try operator()
+ // of all F... .
+ template<class... Args,
+ std::enable_if_t<not IsCallable<F0(Args&&...)>::value, int> = 0>
+ decltype(auto) operator()(Args&&... args)
+ {
+ return Base::operator()(std::forward<Args>(args)...);
+ }
+
+ };
+
+ template<class F0>
+ class OrderedOverloadSet<F0>: public F0
+ {
+ public:
+
+ template<class FF0>
+ OrderedOverloadSet(FF0&& f0) :
+ F0(std::forward<FF0>(f0))
+ {}
+
+ // Forward to operator() of F0. If it cannot be called with
+ // the given arguments a static assertion will fail.
+ template<class... Args>
+ decltype(auto) operator()(Args&&... args)
+ {
+ static_assert(IsCallable<F0(Args&&...)>::value,
+ "No matching overload found in OrderedOverloadSet");
+ return F0::operator()(std::forward<Args>(args)...);
+ }
+ };
+
+} // end namespace Impl
+
+
+
+/**
+ * \brief Create an ordered overload set
+ *
+ * \tparam F List of function object types
+ * \param f List of function objects
+ *
+ * This returns an object that contains all
+ * operator() implementations of the passed
+ * functions. All those are available when
+ * calling operator() of the returned object.
+ *
+ * In contrast to overload() these overloads
+ * are ordered in the sense that the first
+ * matching overload for the given arguments
+ * is selected and later ones are ignored.
+ * Hence such a call is never ambiguous.
+ *
+ * Notice that the passed function objects are
+ * stored by value and must be copy-constructible.
+ *
+ * \ingroup CxxUtilities
+ */
+template<class... F>
+auto orderedOverload(F&&... f)
+{
+ return Impl::OrderedOverloadSet<std::decay_t<F>...>(std::forward<F>(f)...);
+}
+
+
+
+} // end namespace Dune
+
+#endif // DUNE_COMMON_OVERLOADSET_HH
--- /dev/null
+add_subdirectory(test)
+add_subdirectory(benchmark)
+
+#install headers
+install(FILES
+ collectivecommunication.hh
+ communication.hh
+ communicator.hh
+ indexset.hh
+ indicessyncer.hh
+ interface.hh
+ localindex.hh
+ mpicollectivecommunication.hh
+ mpicommunication.hh
+ mpiguard.hh
+ future.hh
+ mpifuture.hh
+ mpidata.hh
+ mpipack.hh
+ mpihelper.hh
+ mpitraits.hh
+ plocalindex.hh
+ remoteindices.hh
+ selection.hh
+ variablesizecommunicator.hh
+ DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/dune/common/parallel)
--- /dev/null
+add_executable(mpi_collective_benchmark EXCLUDE_FROM_ALL mpi_collective_benchmark.cc)
+dune_target_link_libraries(mpi_collective_benchmark PUBLIC "dunecommon")
+add_dune_mpi_flags(mpi_collective_benchmark)
+
+configure_file(options.ini options.ini COPYONLY)
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+/**
+ * @brief Benchmark for measure the possible overlap of computation
+ * and communication at MPI collective communications.
+ *
+ * This benchmark is inspired by the sandia micro benchmark:
+ * W. Lawry, C. Wilson, A. Maccabe, R. Brightwell. COMB: A Portable Benchmark
+ * Suite for Assessing MPI Overlap. In Proceedings of the IEEE International
+ * Conference on Cluster Computing (CLUSTER 2002), p. 472, 2002.
+ * http://www.cs.sandia.gov/smb/overhead.html
+ *
+ * The following communication times are measured:
+ * Blocking: Blocking call. E.g. MPI_Allreduce
+ * Nonblocking_wait (NB_Wait): Nonblocking (e.g. MPI_Iallreduce) call
+ * directly followed by MPI_Wait.
+ * Nonblocking_sleep (NB_Sleep): Nonblocking call followed by a busy wait
+ * until the work time has passed. Then
+ * MPI_Wait.
+ * Nonblocking_active (NB_active): Nonblocking call followed by a basy wait
+ * where in every iteration MPI_Test is
+ * called until the work time has passed.
+ * The MPI_wait.
+ *
+ * The overhead is computed as the time for the Nonblocking call plus
+ * the time for MPI_Wait. The iteration time is the time for the whole
+ * communication. The available part of the communication
+ * time(avail(%)) is computed as 1-(overhead/base_t), where base_t is
+ * the time for calling the method with wait time = 0. The overhead is
+ * determined by increasing the work time successive until it is the
+ * dominant factor in the iteration time. Then the overhead is
+ * computed as iter_t-work_t.
+ *
+ * Usage: mpirun ./mpi_collective_benchmark [options]
+ *
+ * options:
+ * -method: default: allreduce.
+ * possible methods: allreduce, barrier,
+ * broadcast, gather, allgather, scatter
+ * -iterations: default: 10000. Number of iterations for
+ * measure the time for one communication
+ * -allMethods: defaukt:0. If 1 iterates over all available methods
+ * -startSize: default: n, where n is the size of MPI_COMM_WORLD. runs
+ * the benchmark for different communicator sizes, starting with
+ * startSize. After every run the size is doubled. Finally one run is
+ * made for the whole communicator.
+ * -verbose: default: 0. If 1 prints intermediate information while determining
+ * the overhead.
+ * -threshold: default: 2. The threshold when the work time is the dominant
+ * factor in the iteration time. (Similar to the threshold in the
+ * sandia benchmark)
+ * -nohdr: default: 0. Suppress output of the header.
+ *
+ * options can be set either in the options.ini file or can be pass at
+ * the command-line (-key value).
+ *
+ * To get a good 'available' value for the NB_sleep communication, some
+ * MPI implementation need to spawn an extra thread. With MPICH you
+ * can activate this by setting the environment variable
+ * MPI_ASYNC_PROGRESS to 1, with IntelMPI the variable is called
+ * I_MPI_ASYNC_PROGRESS.
+ * (https://software.intel.com/en-us/mpi-developer-reference-linux-asynchronous-progress-control)
+ */
+
+
+#include <config.h>
+
+#include <iostream>
+#include <iomanip>
+#include <thread>
+
+#include <dune/common/parallel/mpihelper.hh>
+#include <dune/common/timer.hh>
+#include <dune/common/parametertree.hh>
+#include <dune/common/parametertreeparser.hh>
+
+Dune::ParameterTree options;
+std::vector<std::string> all_methods = {"allreduce",
+ "barrier",
+ "broadcast",
+ "gather",
+ "allgather",
+ "scatter"};
+
+template<class CC>
+void communicate(CC& cc){
+ auto method = options.get("method", "allreduce");
+ std::vector<int> data(1, 42);
+ if(method == "allreduce"){
+ cc.template allreduce<std::plus<int>>(data);
+ return;
+ }
+ if(method == "barrier"){
+ cc.barrier();
+ return;
+ }
+ if(method == "broadcast"){
+ cc.broadcast(data.data(), data.size(), 0);
+ return;
+ }
+ if(method == "gather"){
+ std::vector<int> recv_data(cc.size(), 0);
+ cc.gather(data.data(), recv_data.data(), 1, 0);
+ return;
+ }
+ if(method == "allgather"){
+ std::vector<int> recv_data(cc.size(), 0);
+ cc.allgather(data.data(), 1, recv_data.data());
+ return;
+ }
+ if(method == "scatter"){
+ std::vector<int> send_data(cc.size(), 42);
+ cc.scatter(send_data.data(), data.data(), 1, 0);
+ return;
+ }
+ DUNE_THROW(Dune::Exception, "Unknown method");
+}
+
+template<class CC>
+Dune::Future<void> startCommunication(CC& cc){
+ auto method = options.get("method", "allreduce");
+ if(method == "allreduce"){
+ return cc.template iallreduce<std::plus<char>>(42);
+ }
+ if(method == "barrier"){
+ return cc.ibarrier();
+ }
+ if(method == "broadcast"){
+ return cc.ibroadcast(42, 0);
+ }
+ if(method == "gather"){
+ return cc.igather(42, std::vector<int>(cc.size()), 0);
+ }
+ if(method == "allgather"){
+ return cc.iallgather(42, std::vector<int>(cc.size()));
+ }
+ if(method == "scatter"){
+ return cc.iscatter(std::vector<int>(cc.size(), 42), 0, 0);
+ }
+ DUNE_THROW(Dune::Exception, "Unknown method");
+}
+
+template<class CC>
+double runBlocking(CC& cc){
+ std::vector<char> answer(1, 42);
+ int iterations = options.get("iterations", 1000);
+ Dune::Timer watch;
+ for(int i = 0; i < iterations; i++){
+ cc.barrier();
+ watch.start();
+ communicate(cc);
+ watch.stop();
+ }
+ return cc.sum(watch.elapsed())/iterations/cc.size();
+}
+
+template<class CC>
+double runNonblockingWait(CC& cc){
+ std::vector<char> answer(1, 42);
+ Dune::Timer watch;
+ int iterations = options.get("iterations", 1000);
+ for(int i = 0; i < iterations; i++){
+ cc.barrier();
+ watch.start();
+ auto f = startCommunication(cc);
+ f.wait();
+ watch.stop();
+ }
+ return cc.sum(watch.elapsed())/iterations/cc.size();
+}
+
+std::tuple<double, double> runNonblockingSleep(decltype(Dune::MPIHelper::getCommunication())& cc, std::chrono::duration<double> wait_time){
+ std::vector<char> answer(1, 42);
+ Dune::Timer watch, watch_work;
+ int iterations = options.get("iterations", 1000);
+ for(int i = 0; i < iterations; i++){
+ cc.barrier();
+ watch.start();
+ auto f = startCommunication(cc);
+ watch_work.start();
+ auto start_time = std::chrono::high_resolution_clock::now();
+ while(std::chrono::high_resolution_clock::now()-start_time < wait_time);
+ watch_work.stop();
+ f.wait();
+ watch.stop();
+ }
+ return std::tuple<double, double>(cc.sum(watch.stop())/iterations/cc.size(),
+ cc.sum(watch_work.stop())/iterations/cc.size());
+}
+
+std::tuple<double, double> runNonblockingActive(decltype(Dune::MPIHelper::getCommunication())& cc, std::chrono::duration<double> wait_time){
+ std::vector<char> answer(1, 42);
+ int iterations = options.get("iterations", 1000);
+ Dune::Timer watch, watch_work;
+ for(int i = 0; i < iterations; i++){
+ cc.barrier();
+ watch.start();
+ auto f = startCommunication(cc);
+ watch_work.start();
+ auto start_time = std::chrono::high_resolution_clock::now();
+ while(std::chrono::high_resolution_clock::now()-start_time < wait_time)
+ f.ready();
+ watch_work.stop();
+ f.wait();
+ watch.stop();
+ }
+ // return the time spend in communication methods
+ return std::tuple<double, double>(cc.sum(watch.stop())/iterations/cc.size(),
+ cc.sum(watch_work.stop())/iterations/cc.size());
+}
+
+/* Increases the work until it is the dominant factor in the iteration
+ time. Returns the base time and how much of it is available for
+ computations(%). It is computed with the formula 1-(overhead/base_t).
+ */
+std::tuple<double, double> determineOverlap(std::function<std::tuple<double, double>(std::chrono::duration<double>)> fun)
+{
+ double base_t = 0;
+ std::tie(base_t, std::ignore) = fun(std::chrono::duration<double>(0));
+ if(options.get("verbose", 0))
+ std::cout << std::endl << std::endl << std::setw(12) << "base_t:" << base_t << std::endl;
+ double iter_t = 0;
+ double work_t = 0;
+ int i = 1;
+ double iter_t_threshold = options.get("threshold", 2.0);
+ for(double work = 0.25*base_t; iter_t < iter_t_threshold*base_t; work *= 2, i++){
+ std::tie(iter_t, work_t) = fun(std::chrono::duration<double>(work));
+ if(options.get("verbose", 0))
+ std::cout << i << std::setw(12) << " iter_t:" << std::setw(12) << iter_t
+ << std::setw(12) << " work_t:" << std::setw(12) << work_t << std::endl;
+ }
+ double overhead = iter_t-work_t;
+ double avail = 1.0-overhead/base_t;
+ if(options.get("verbose", 0))
+ std::cout << std::setw(12) << " ovhd:" << std::setw(12) << overhead
+ << std::setw(12) << " available:" << std::setw(12) << avail << std::endl;
+ return std::tuple<double, double>(base_t, avail);
+}
+
+void printHeader(){
+ if(options.get("nohdr", 0) == 0){
+ std::cout << "Method: " << options.get("method", "allreduce") << std::endl;
+ std::cout << std::scientific;
+ std::cout << std::setw(10) << "commsize"
+ << std::setw(12) << "iterations"
+ << std::setw(16) << "Blocking"
+ << std::setw(16) << "NB_wait"
+ << std::setw(16) << "NB_sleep"
+ << std::setw(12) << "avail(%)"
+ << std::setw(16) << "NB_active"
+ << std::setw(12) << "avail(%)"
+ << std::endl;
+ }
+}
+
+void run(int s){
+ auto comm_world = Dune::MPIHelper::getCommunication();
+ Dune::MPIHelper::MPICommunicator comm;
+ #if HAVE_MPI
+ MPI_Comm_split(comm_world, comm_world.rank() < s, comm_world.rank(), &comm);
+ #endif
+ if(comm_world.rank() < s){
+ Dune::Communication<Dune::MPIHelper::MPICommunicator> cc(comm);
+ std::cout << std::setw(10) << cc.size()
+ << std::setw(12) << options.get("iterations", 1000) << std::flush;
+
+ double blocking_t = runBlocking(cc);
+ std::cout << std::setw(16) << blocking_t << std::flush;
+
+ double nb_wait_t = runNonblockingWait(cc);
+ std::cout << std::setw(16) << nb_wait_t << std::flush;
+
+ using namespace std::placeholders;
+ auto nb_sleep = std::bind(runNonblockingSleep, std::ref(cc), _1);
+ double nb_sleep_t, nb_sleep_avail;
+ std::tie(nb_sleep_t, nb_sleep_avail) = determineOverlap(nb_sleep);
+ std::cout << std::setw(16) << nb_sleep_t
+ << std::setw(12) << std::fixed << std::setprecision(2) << 100*nb_sleep_avail
+ << std::scientific << std::setprecision(6) << std::flush;
+
+ auto nb_active = std::bind(runNonblockingActive, cc, _1);
+ double nb_active_t, nb_active_avail;
+ std::tie(nb_active_t, nb_active_avail) = determineOverlap(nb_active);
+ std::cout << std::setw(16) << nb_active_t
+ << std::setw(12) << std::fixed << std::setprecision(2) << 100*nb_active_avail
+ << std::scientific << std::setprecision(6) << std::endl;
+ }
+}
+
+int main(int argc, char** argv){
+ Dune::MPIHelper& mpihelper = Dune::MPIHelper::instance(argc, argv);
+
+ // disable output on almost all ranks
+ if(mpihelper.rank() != 0)
+ std::cout.setstate(std::ios_base::failbit);
+ // parse options
+ Dune::ParameterTreeParser::readINITree("options.ini", options);
+ Dune::ParameterTreeParser::readOptions(argc, argv, options);
+
+ std::vector<std::string> methods = {options.get("method", "allreduce")};
+ if(options.get("allMethods", 0) == 1)
+ methods = std::vector<std::string>(all_methods);
+ for(std::string method : methods){
+ options["method"] = method;
+ std::cout << std::left << std::scientific;
+ printHeader();
+ int s = options.get("startSize", mpihelper.size());
+ while(s < mpihelper.size()){
+ run(s);
+ s *= 2;
+ }
+ run(mpihelper.size());
+ }
+ return 0;
+}
--- /dev/null
+iterations = 10000
+method = "allreduce"
+allMethods = 0
+threshold = 2.0
+# startSize = 1
\ No newline at end of file
--- /dev/null
+// Will be removed after the 2.7 release
+#warning "Deprecated header, use #include <dune/common/parallel/communication.hh> instead!"
+#include <dune/common/parallel/communication.hh>
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_COMMON_PARALLEL_COMMUNICATION_HH
+#define DUNE_COMMON_PARALLEL_COMMUNICATION_HH
+/*!
+ \file
+ \brief Implements an utility class that provides
+ collective communication methods for sequential programs.
+
+ \ingroup ParallelCommunication
+ */
+#include <iostream>
+#include <complex>
+#include <algorithm>
+#include <vector>
+
+#include <dune/common/binaryfunctions.hh>
+#include <dune/common/exceptions.hh>
+#include <dune/common/parallel/future.hh>
+
+/*! \defgroup ParallelCommunication Parallel Communication
+ \ingroup Common
+
+ \brief Abstractions for parallel computing
+
+ Dune offers an abstraction to the basic methods of parallel
+ communication. It allows one to switch parallel features on and off,
+ without changing the code. This is done using either Communication
+ or MPICommunication.
+
+ */
+
+/*!
+ \file
+ \brief An abstraction to the basic methods of parallel communication,
+ following the message-passing paradigm.
+ \ingroup ParallelCommunication
+ */
+
+namespace Dune
+{
+
+ /* define some type that definitely differs from MPI_Comm */
+ struct No_Comm {};
+
+ /*! @brief Comparison operator for MPI compatibility
+
+ Always returns true.
+ */
+ inline bool operator==(const No_Comm&, const No_Comm&)
+ {
+ return true;
+ }
+
+ /*! @brief Comparison operator for MPI compatibility
+
+ Always returns false.
+ */
+ inline bool operator!=(const No_Comm&, const No_Comm&)
+ {
+ return false;
+ }
+
+ /*! @brief Collective communication interface and sequential default implementation
+
+ Communication offers an abstraction to the basic methods
+ of parallel communication, following the message-passing
+ paradigm. It allows one to switch parallel features on and off, without
+ changing the code. Currently only MPI and sequential code are
+ supported.
+
+ A Communication object is returned by all grids (also
+ the sequential ones) in order to allow code to be written in
+ a transparent way for sequential and parallel grids.
+
+ This class provides a default implementation for sequential grids.
+ The number of processes involved is 1, any sum, maximum, etc. returns
+ just its input argument and so on.
+
+ In specializations one can implement the real thing using appropriate
+ communication functions, e.g. there exists an implementation using
+ the Message Passing %Interface (MPI), see Dune::Communication<MPI_Comm>.
+
+ Moreover, the communication subsystem used by an implementation
+ is not visible in the interface, i.e. Dune grid implementations
+ are not restricted to MPI.
+
+ \tparam Communicator The communicator type used by your message-passing implementation.
+ For MPI this will be MPI_Comm. For sequential codes there is the dummy communicator No_Comm.
+ It is assumed that if you want to specialize the Communication class for a
+ message-passing system other than MPI, that message-passing system will have something
+ equivalent to MPI communicators.
+
+ \ingroup ParallelCommunication
+ */
+ template<typename Communicator>
+ class Communication
+ {
+ public:
+ //! Construct default object
+ Communication()
+ {}
+
+ /** \brief Constructor with a given communicator
+ *
+ * As this is implementation for the sequential setting, the communicator is a dummy and simply discarded.
+ */
+ Communication (const Communicator&)
+ {}
+
+ //! Return rank, is between 0 and size()-1
+ int rank () const
+ {
+ return 0;
+ }
+
+ //! cast to the underlying Fake MPI communicator
+ operator No_Comm() const
+ {
+ return {};
+ }
+
+ //! Number of processes in set, is greater than 0
+ int size () const
+ {
+ return 1;
+ }
+
+ /** @brief Sends the data to the dest_rank
+ @returns MPI_SUCCESS (==0) if successful, an MPI error code otherwise
+ */
+ template<class T>
+ int send([[maybe_unused]] const T& data,
+ [[maybe_unused]] int dest_rank,
+ [[maybe_unused]] int tag)
+ {
+ DUNE_THROW(ParallelError, "This method is not supported in sequential programs");
+ }
+
+ /** @brief Sends the data to the dest_rank nonblocking
+ @returns Future<T> containing the send buffer, completes when data is send
+ */
+ template<class T>
+ PseudoFuture<T> isend([[maybe_unused]] const T&& data,
+ [[maybe_unused]] int dest_rank,
+ [[maybe_unused]] int tag)
+ {
+ DUNE_THROW(ParallelError, "This method is not supported in sequential programs");
+ }
+
+ /** @brief Receives the data from the source_rank
+ @returns MPI_SUCCESS (==0) if successful, an MPI error code otherwise
+ */
+ template<class T>
+ T recv([[maybe_unused]] T&& data,
+ [[maybe_unused]] int source_rank,
+ [[maybe_unused]] int tag,
+ [[maybe_unused]] void* status = 0)
+ {
+ DUNE_THROW(ParallelError, "This method is not supported in sequential programs");
+ }
+
+ /** @brief Receives the data from the source_rank nonblocking
+ @returns Future<T> containing the received data when complete
+ */
+ template<class T>
+ PseudoFuture<T> irecv([[maybe_unused]] T&& data,
+ [[maybe_unused]] int source_rank,
+ [[maybe_unused]] int tag)
+ {
+ DUNE_THROW(ParallelError, "This method is not supported in sequential programs");
+ }
+
+ template<class T>
+ T rrecv([[maybe_unused]] T&& data,
+ [[maybe_unused]] int source_rank,
+ [[maybe_unused]] int tag,
+ [[maybe_unused]] void* status = 0) const
+ {
+ DUNE_THROW(ParallelError, "This method is not supported in sequential programs");
+ }
+ /** @brief Compute the sum of the argument over all processes and
+ return the result in every process. Assumes that T has an operator+
+ */
+ template<typename T>
+ T sum (const T& in) const
+ {
+ return in;
+ }
+
+ /** @brief Compute the sum over all processes for each component of an array and return the result
+ in every process. Assumes that T has an operator+
+
+ @returns MPI_SUCCESS (==0) if successful, an MPI error code otherwise
+ */
+ template<typename T>
+ int sum ([[maybe_unused]] T* inout, [[maybe_unused]] int len) const
+ {
+ return 0;
+ }
+
+ /** @brief Compute the product of the argument over all processes and
+ return the result in every process. Assumes that T has an operator*
+ */
+ template<typename T>
+ T prod (const T& in) const
+ {
+ return in;
+ }
+
+ /** @brief Compute the product over all processes
+ for each component of an array and return the result
+ in every process. Assumes that T has an operator*
+ @returns MPI_SUCCESS (==0) if successful, an MPI error code otherwise
+ */
+ template<typename T>
+ int prod ([[maybe_unused]] T* inout, [[maybe_unused]] int len) const
+ {
+ return 0;
+ }
+
+ /** @brief Compute the minimum of the argument over all processes and
+ return the result in every process. Assumes that T has an operator<
+ */
+ template<typename T>
+ T min (const T& in) const
+ {
+ return in;
+ }
+
+ /** @brief Compute the minimum over all processes
+ for each component of an array and return the result
+ in every process. Assumes that T has an operator<
+ @returns MPI_SUCCESS (==0) if successful, an MPI error code otherwise
+ */
+ template<typename T>
+ int min ([[maybe_unused]] T* inout, [[maybe_unused]] int len) const
+ {
+ return 0;
+ }
+
+ /** @brief Compute the maximum of the argument over all processes and
+ return the result in every process. Assumes that T has an operator<
+ */
+ template<typename T>
+ T max (const T& in) const
+ {
+ return in;
+ }
+
+ /** @brief Compute the maximum over all processes
+ for each component of an array and return the result
+ in every process. Assumes that T has an operator<
+ @returns MPI_SUCCESS (==0) if successful, an MPI error code otherwise
+ */
+ template<typename T>
+ int max ([[maybe_unused]] T* inout, [[maybe_unused]] int len) const
+ {
+ return 0;
+ }
+
+ /** @brief Wait until all processes have arrived at this point in the program.
+ @returns MPI_SUCCESS (==0) if successful, an MPI error code otherwise
+ */
+ int barrier () const
+ {
+ return 0;
+ }
+
+ /** @brief Nonblocking barrier
+ @returns Future<void> which is complete when all processes have reached the barrier
+ */
+ PseudoFuture<void> ibarrier () const
+ {
+ return {true}; // return a valid future
+ }
+
+ /** @brief Distribute an array from the process with rank root to all other processes
+ @returns MPI_SUCCESS (==0) if successful, an MPI error code otherwise
+ */
+ template<typename T>
+ int broadcast ([[maybe_unused]] T* inout,
+ [[maybe_unused]] int len,
+ [[maybe_unused]] int root) const
+ {
+ return 0;
+ }
+
+ /** @brief Distribute an array from the process with rank root to all other processes nonblocking
+ @returns Future<T> containing the distributed data
+ */
+ template<class T>
+ PseudoFuture<T> ibroadcast(T&& data, int root) const{
+ return {std::forward<T>(data)};
+ }
+
+
+ /** @brief Gather arrays on root task.
+ *
+ * Each process sends its in array of length len to the root process
+ * (including the root itself). In the root process these arrays are stored in rank
+ * order in the out array which must have size len * number of processes.
+ * @param[in] in The send buffer with the data to send.
+ * @param[out] out The buffer to store the received data in. Might have length zero on non-root
+ * tasks.
+ * @param[in] len The number of elements to send on each task.
+ * @param[in] root The root task that gathers the data.
+ * @returns MPI_SUCCESS (==0) if successful, an MPI error code otherwise
+ */
+ template<typename T>
+ int gather (const T* in, T* out, int len, [[maybe_unused]] int root) const // note out must have same size as in
+ {
+ for (int i=0; i<len; i++)
+ out[i] = in[i];
+ return 0;
+ }
+
+ /** @brief Gather arrays on root task nonblocking
+ @returns Future<TOUT, TIN> containing the gathered data
+ */
+ template<class TIN, class TOUT = std::vector<TIN>>
+ PseudoFuture<TOUT> igather(TIN&& data_in, TOUT&& data_out, int root){
+ *(data_out.begin()) = std::forward<TIN>(data_in);
+ return {std::forward<TOUT>(data_out)};
+ }
+
+
+ /** @brief Gather arrays of variable size on root task.
+ *
+ * Each process sends its in array of length sendDataLen to the root process
+ * (including the root itself). In the root process these arrays are stored in rank
+ * order in the out array.
+ * @param[in] in The send buffer with the data to be sent
+ * @param[in] sendDataLen The number of elements to send on each task
+ * @param[out] out The buffer to store the received data in. May have length zero on non-root
+ * tasks.
+ * @param[in] recvDataLen An array with size equal to the number of processes containing the number
+ * of elements to receive from process i at position i, i.e. the number that
+ * is passed as sendDataLen argument to this function in process i.
+ * May have length zero on non-root tasks.
+ * @param[out] displ An array with size equal to the number of processes. Data received from
+ * process i will be written starting at out+displ[i] on the root process.
+ * May have length zero on non-root tasks.
+ * @param[in] root The root task that gathers the data.
+ * @returns MPI_SUCCESS (==0) if successful, an MPI error code otherwise
+ */
+ template<typename T>
+ int gatherv (const T* in,
+ int sendDataLen,
+ T* out,
+ [[maybe_unused]] int* recvDataLen,
+ int* displ,
+ [[maybe_unused]] int root) const
+ {
+ for (int i=*displ; i<sendDataLen; i++)
+ out[i] = in[i];
+ return 0;
+ }
+
+ /** @brief Scatter array from a root to all other task.
+ *
+ * The root process sends the elements with index from k*len to (k+1)*len-1 in its array to
+ * task k, which stores it at index 0 to len-1.
+ * @param[in] sendData The array to scatter. Might have length zero on non-root
+ * tasks.
+ * @param[out] recvData The buffer to store the received data in. Upon completion of the
+ * method each task will have same data stored there as the one in
+ * send buffer of the root task before.
+ * @param[in] len The number of elements in the recv buffer.
+ * @param[in] root The root task that gathers the data.
+ * @returns MPI_SUCCESS (==0) if successful, an MPI error code otherwise
+ */
+ template<typename T>
+ int scatter (const T* sendData, T* recvData, int len, [[maybe_unused]] int root) const // note out must have same size as in
+ {
+ for (int i=0; i<len; i++)
+ recvData[i] = sendData[i];
+ return 0;
+ }
+
+ /** @brief Scatter array from a root to all other task nonblocking.
+ * @returns Future<TOUT, TIN> containing scattered data;
+ */
+ template<class TIN, class TOUT = TIN>
+ PseudoFuture<TOUT> iscatter(TIN&& data_in, TOUT&& data_out, int root){
+ data_out = *(std::forward<TIN>(data_in).begin());
+ return {std::forward<TOUT>(data_out)};
+ }
+
+ /** @brief Scatter arrays of variable length from a root to all other tasks.
+ *
+ * The root process sends the elements with index from send+displ[k] to send+displ[k]-1 in
+ * its array to task k, which stores it at index 0 to recvDataLen-1.
+ * @param[in] sendData The array to scatter. May have length zero on non-root
+ * tasks.
+ * @param[in] sendDataLen An array with size equal to the number of processes containing the number
+ * of elements to scatter to process i at position i, i.e. the number that
+ * is passed as recvDataLen argument to this function in process i.
+ * @param[in] displ An array with size equal to the number of processes. Data scattered to
+ * process i will be read starting at send+displ[i] on root the process.
+ * @param[out] recvData The buffer to store the received data in. Upon completion of the
+ * method each task will have the same data stored there as the one in
+ * send buffer of the root task before.
+ * @param[in] recvDataLen The number of elements in the recvData buffer.
+ * @param[in] root The root task that gathers the data.
+ * @returns MPI_SUCCESS (==0) if successful, an MPI error code otherwise
+ */
+ template<typename T>
+ int scatterv (const T* sendData,int* sendDataLen, int* displ, T* recvData,
+ [[maybe_unused]] int recvDataLen, [[maybe_unused]] int root) const
+ {
+ for (int i=*displ; i<*sendDataLen; i++)
+ recvData[i] = sendData[i];
+ return 0;
+ }
+
+ /**
+ * @brief Gathers data from all tasks and distribute it to all.
+ *
+ * The block of data sent from the jth process is received by every
+ * process and placed in the jth block of the buffer recvbuf.
+ *
+ * @param[in] sbuf The buffer with the data to send. Has to be the same for
+ * each task.
+ * @param[in] count The number of elements to send by any process.
+ * @param[out] rbuf The receive buffer for the data. Has to be of size
+ * notasks*count, with notasks being the number of tasks in the communicator.
+ * @returns MPI_SUCCESS (==0) if successful, an MPI error code otherwise
+ */
+ template<typename T>
+ int allgather(const T* sbuf, int count, T* rbuf) const
+ {
+ for(const T* end=sbuf+count; sbuf < end; ++sbuf, ++rbuf)
+ *rbuf=*sbuf;
+ return 0;
+ }
+
+ /**
+ * @brief Gathers data from all tasks and distribute it to all nonblocking.
+ @returns Future<TOUT, TIN> containing the distributed data
+ */
+ template<class TIN, class TOUT = TIN>
+ PseudoFuture<TOUT> iallgather(TIN&& data_in, TOUT&& data_out){
+ return {std::forward<TOUT>(data_out)};
+ }
+
+ /**
+ * @brief Gathers data of variable length from all tasks and distribute it to all.
+ *
+ * The block of data sent from the jth process is received by every
+ * process and placed in the jth block of the buffer out.
+ *
+ * @param[in] in The send buffer with the data to send.
+ * @param[in] sendDataLen The number of elements to send on each task.
+ * @param[out] out The buffer to store the received data in.
+ * @param[in] recvDataLen An array with size equal to the number of processes containing the number
+ * of elements to receive from process i at position i, i.e. the number that
+ * is passed as sendDataLen argument to this function in process i.
+ * @param[in] displ An array with size equal to the number of processes. Data received from
+ * process i will be written starting at out+displ[i].
+ * @returns MPI_SUCCESS (==0) if successful, an MPI error code otherwise
+ */
+ template<typename T>
+ int allgatherv (const T* in, int sendDataLen, T* out, [[maybe_unused]] int* recvDataLen, int* displ) const
+ {
+ for (int i=*displ; i<sendDataLen; i++)
+ out[i] = in[i];
+ return 0;
+ }
+
+ /**
+ * @brief Compute something over all processes
+ * for each component of an array and return the result
+ * in every process.
+ *
+ * The template parameter BinaryFunction is the type of
+ * the binary function to use for the computation
+ *
+ * @param inout The array to compute on.
+ * @param len The number of components in the array
+ * @returns MPI_SUCCESS (==0) if successful, an MPI error code otherwise
+ */
+ template<typename BinaryFunction, typename Type>
+ int allreduce([[maybe_unused]] Type* inout, [[maybe_unused]] int len) const
+ {
+ return 0;
+ }
+
+ /**
+ * @brief Compute something over all processes nonblocking
+ @return Future<TOUT, TIN> containing the computed something
+ */
+ template<class BinaryFunction, class TIN, class TOUT = TIN>
+ PseudoFuture<TOUT> iallreduce(TIN&& data_in, TOUT&& data_out){
+ data_out = std::forward<TIN>(data_in);
+ return {std::forward<TOUT>(data_out)};
+ }
+
+ /**
+ * @brief Compute something over all processes nonblocking and in-place
+ @return Future<T> containing the computed something
+ */
+ template<class BinaryFunction, class T>
+ PseudoFuture<T> iallreduce(T&& data){
+ return {std::forward<T>(data)};
+ }
+
+
+ /**
+ * @brief Compute something over all processes
+ * for each component of an array and return the result
+ * in every process.
+ *
+ * The template parameter BinaryFunction is the type of
+ * the binary function to use for the computation
+ *
+ * @param in The array to compute on.
+ * @param out The array to store the results in.
+ * @param len The number of components in the array
+ * @returns MPI_SUCCESS (==0) if successful, an MPI error code otherwise
+ */
+ template<typename BinaryFunction, typename Type>
+ int allreduce(const Type* in, Type* out, int len) const
+ {
+ std::copy(in, in+len, out);
+ return 0;
+ }
+
+ };
+
+ template<class T>
+ using CollectiveCommunication
+ // Will be deprecated after the 2.7 release
+ //[[deprecated("CollectiveCommunication is deprecated. Use Communication instead.")]]
+ = Communication<T>;
+}
+
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_COMMUNICATOR
+#define DUNE_COMMUNICATOR
+
+#if HAVE_MPI
+
+#include <cassert>
+#include <cstddef>
+#include <iostream>
+#include <map>
+#include <type_traits>
+#include <utility>
+
+#include <mpi.h>
+
+#include <dune/common/exceptions.hh>
+#include <dune/common/parallel/interface.hh>
+#include <dune/common/parallel/remoteindices.hh>
+#include <dune/common/stdstreams.hh>
+
+namespace Dune
+{
+ /** @defgroup Common_Parallel Parallel Computing based on Indexsets
+ * @ingroup ParallelCommunication
+ * @brief Provides classes for syncing distributed indexed
+ * data structures.
+ *
+ * In a parallel representation a container \f$x\f$,
+ * e.g. a plain C-array, cannot be stored with all entries on each process
+ * because of limited memory and efficiency reasons. Therefore
+ * it is represented by individual
+ * pieces \f$x_p\f$, \f$p=0, \ldots, P-1\f$, where \f$x_p\f$ is the piece stored on
+ * process \f$p\f$ of the \f$P\f$ processes participating in the calculation.
+ * Although the global representation of the container is not
+ * available on any process, a process \f$p\f$ needs to know how the entries
+ * of it's local piece \f$x_p\f$ correspond to the entries of the global
+ * container \f$x\f$, which would be used in a sequential program. In this
+ * module we present classes describing the mapping of the local pieces
+ * to the global
+ * view and the communication interfaces.
+ *
+ * @section IndexSet Parallel Index Sets
+ *
+ * Form an abstract point of view a random access container \f$x: I
+ * \rightarrow K\f$ provides a
+ * mapping from an index set \f$I \subset N_0\f$ onto a set of objects
+ * \f$K\f$. Note that we do not require \f$I\f$ to be consecutive. The piece
+ * \f$x_p\f$ of the container \f$x\f$ stored on process \f$p\f$ is a mapping \f$x_p:I_p
+ * \rightarrow K\f$, where \f$I_p \subset I\f$. Due to efficiency the entries
+ * of \f$x_p\f$ should be stored in consecutive memory.
+ *
+ * This means that for the local computation the data must be addressable
+ * by a consecutive index starting from \f$0\f$. When using adaptive
+ * discretisation methods there might be the need to reorder the indices
+ * after adding and/or deleting some of the discretisation
+ * points. Therefore this index does not have to be persistent. Further
+ * on we will call this index <em>local index</em>.
+ *
+ * For the communication phases of our algorithms these locally stored
+ * entries must also be addressable by a global identifier to be able to
+ * store the received values tagged with the global identifiers at the
+ * correct local index in the consecutive local memory chunk. To ease the
+ * addition and removal of discretisation points this global identifier has
+ * to be persistent. Further on we will call this global identifier
+ * <em>global index</em>.
+ *
+ * Classes to build the mapping are ParallelIndexSet and ParallelLocalIndex.
+ * As these just provide a mapping from the global index to the local index,
+ * the wrapper class GlobalLookupIndexSet facilitates the reverse lookup.
+ *
+ * @section remote Remote Index Information
+ *
+ * To setup communication between the processes every process needs to
+ * know what indices are also known to other processes and what
+ * attributes are attached to them on the remote side. This information is
+ * calculated and encapsulated in class RemoteIndices.
+ *
+ * @section comm Communication
+ *
+ * Based on the information about the distributed index sets, data
+ * independent interfaces between different sets of the index sets
+ * can be setup using the class Interface. For the actual communication
+ * data dependent communicators can be setup using BufferedCommunicator,
+ * DatatypeCommunicator VariableSizeCommunicator based on the interface
+ * information. In contrast to the former
+ * the latter is independent of the class Interface can work on a map
+ * from process number to a pair of index lists describing which local indices
+ * are send and received from that processs, respectively.
+ */
+ /** @addtogroup Common_Parallel
+ *
+ * @{
+ */
+ /**
+ * @file
+ * @brief Provides utility classes for syncing distributed data via
+ * MPI communication.
+ * @author Markus Blatt
+ */
+
+ /**
+ * @brief Flag for marking indexed data structures where data at
+ * each index is of the same size.
+ * @see VariableSize
+ */
+ struct SizeOne
+ {};
+
+ /**
+ * @brief Flag for marking indexed data structures where the data at each index may
+ * be a variable multiple of another type.
+ * @see SizeOne
+ */
+ struct VariableSize
+ {};
+
+
+ /**
+ * @brief Default policy used for communicating an indexed type.
+ *
+ * This
+ */
+ template<class V>
+ struct CommPolicy
+ {
+ /**
+ * @brief The type the policy is for.
+ *
+ * It has to provide the mode
+ * \code Type::IndexedType operator[](int i);\endcode
+ * for
+ * the access of the value at index i and a typedef IndexedType.
+ * It is assumed
+ * that only one entry is at each index (as in scalar
+ * vector.
+ */
+ typedef V Type;
+
+ /**
+ * @brief The type we get at each index with operator[].
+ *
+ * The default is the value_type typedef of the container.
+ */
+ typedef typename V::value_type IndexedType;
+
+ /**
+ * @brief Whether the indexed type has variable size or there
+ * is always one value at each index.
+ */
+ typedef SizeOne IndexedTypeFlag;
+
+ /**
+ * @brief Get the address of entry at an index.
+ *
+ * The default implementation uses operator[] to
+ * get the address.
+ * @param v An existing representation of the type that has more elements than index.
+ * @param index The index of the entry.
+ */
+ static const void* getAddress(const V& v, int index);
+
+ /**
+ * @brief Get the number of primitve elements at that index.
+ *
+ * The default always returns 1.
+ */
+ static int getSize(const V&, int index);
+ };
+
+ template<class K, int n> class FieldVector;
+
+ template<class B, class A> class VariableBlockVector;
+
+ template<class K, class A, int n>
+ struct CommPolicy<VariableBlockVector<FieldVector<K, n>, A> >
+ {
+ typedef VariableBlockVector<FieldVector<K, n>, A> Type;
+
+ typedef typename Type::B IndexedType;
+
+ typedef VariableSize IndexedTypeFlag;
+
+ static const void* getAddress(const Type& v, int i);
+
+ static int getSize(const Type& v, int i);
+ };
+
+ /**
+ * @brief Error thrown if there was a problem with the communication.
+ */
+ class CommunicationError : public IOError
+ {};
+
+ /**
+ * @brief GatherScatter default implementation that just copies data.
+ */
+ template<class T>
+ struct CopyGatherScatter
+ {
+ typedef typename CommPolicy<T>::IndexedType IndexedType;
+
+ static const IndexedType& gather(const T& vec, std::size_t i);
+
+ static void scatter(T& vec, const IndexedType& v, std::size_t i);
+
+ };
+
+ /**
+ * @brief An utility class for communicating distributed data structures via MPI datatypes.
+ *
+ * This communicator creates special MPI datatypes that address the non contiguous elements
+ * to be send and received. The idea was to prevent the copying to an additional buffer and
+ * the mpi implementation decide whether to allocate buffers or use buffers offered by the
+ * interconnection network.
+ *
+ * Unfortunately the implementation of MPI datatypes seems to be poor. Therefore for most MPI
+ * implementations using a BufferedCommunicator will be more efficient.
+ */
+ template<typename T>
+ class DatatypeCommunicator : public InterfaceBuilder
+ {
+ public:
+
+ /**
+ * @brief Type of the index set.
+ */
+ typedef T ParallelIndexSet;
+
+ /**
+ * @brief Type of the underlying remote indices class.
+ */
+ typedef Dune::RemoteIndices<ParallelIndexSet> RemoteIndices;
+
+ /**
+ * @brief The type of the global index.
+ */
+ typedef typename RemoteIndices::GlobalIndex GlobalIndex;
+
+ /**
+ * @brief The type of the attribute.
+ */
+ typedef typename RemoteIndices::Attribute Attribute;
+
+ /**
+ * @brief The type of the local index.
+ */
+ typedef typename RemoteIndices::LocalIndex LocalIndex;
+
+ /**
+ * @brief Creates a new DatatypeCommunicator.
+ */
+ DatatypeCommunicator();
+
+ /**
+ * @brief Destructor.
+ */
+ ~DatatypeCommunicator();
+
+ /**
+ * @brief Builds the interface between the index sets.
+ *
+ * Has to be called before the actual communication by forward or backward
+ * can be called. Nonpublic indices will be ignored!
+ *
+ *
+ * The types T1 and T2 are classes representing a set of
+ * enumeration values of type DatatypeCommunicator::Attribute.
+ * They have to provide
+ * a (static) method
+ * \code
+ * bool contains(Attribute flag) const;
+ * \endcode
+ * for checking whether the set contains a specific flag.
+ * This functionality is for example provided the classes
+ * EnumItem, EnumRange and Combine.
+ *
+ * @param remoteIndices The indices present on remote processes.
+ * @param sourceFlags The set of attributes which mark indices we send to other
+ * processes.
+ * @param sendData The indexed data structure whose data will be send.
+ * @param destFlags The set of attributes which mark the indices we might
+ * receive values from.
+ * @param receiveData The indexed data structure for which we receive data.
+ */
+ template<class T1, class T2, class V>
+ void build(const RemoteIndices& remoteIndices, const T1& sourceFlags, V& sendData, const T2& destFlags, V& receiveData);
+
+ /**
+ * @brief Sends the primitive values from the source to the destination.
+ */
+ void forward();
+
+ /**
+ * @brief Sends the primitive values from the destination to the source.
+ */
+ void backward();
+
+ /**
+ * @brief Deallocates the MPI requests and data types.
+ */
+ void free();
+ private:
+ enum {
+ /**
+ * @brief Tag for the MPI communication.
+ */
+ commTag_ = 234
+ };
+
+ /**
+ * @brief The indices also known at other processes.
+ */
+ const RemoteIndices* remoteIndices_;
+
+ typedef std::map<int,std::pair<MPI_Datatype,MPI_Datatype> >
+ MessageTypeMap;
+
+ /**
+ * @brief The datatypes built according to the communication interface.
+ */
+ MessageTypeMap messageTypes;
+
+ /**
+ * @brief The pointer to the data whose entries we communicate.
+ */
+ void* data_;
+
+ MPI_Request* requests_[2];
+
+ /**
+ * @brief True if the request and data types were created.
+ */
+ bool created_;
+
+ /**
+ * @brief Creates the MPI_Requests for the forward communication.
+ */
+ template<class V, bool FORWARD>
+ void createRequests(V& sendData, V& receiveData);
+
+ /**
+ * @brief Creates the data types needed for the unbuffered receive.
+ */
+ template<class T1, class T2, class V, bool send>
+ void createDataTypes(const T1& source, const T2& destination, V& data);
+
+ /**
+ * @brief Initiates the sending and receive.
+ */
+ void sendRecv(MPI_Request* req);
+
+ /**
+ * @brief Information used for setting up the MPI Datatypes.
+ */
+ struct IndexedTypeInformation
+ {
+ /**
+ * @brief Allocate space for setting up the MPI datatype.
+ *
+ * @param i The number of values the datatype will have.
+ */
+ void build(int i)
+ {
+ length = new int[i];
+ displ = new MPI_Aint[i];
+ size = i;
+ }
+
+ /**
+ * @brief Free the allocated space.
+ */
+ void free()
+ {
+ delete[] length;
+ delete[] displ;
+ }
+ /** @brief The number of values at each index. */
+ int* length;
+ /** @brief The displacement at each index. */
+ MPI_Aint* displ;
+ /**
+ * @brief The number of elements we send.
+ * In case of variable sizes this will differ from
+ * size.
+ */
+ int elements;
+ /**
+ * @param The number of indices in the data type.
+ */
+ int size;
+ };
+
+ /**
+ * @brief Functor for the InterfaceBuilder.
+ *
+ * It will record the information needed to build the MPI_Datatypes.
+ */
+ template<class V>
+ struct MPIDatatypeInformation
+ {
+ /**
+ * @brief Constructor.
+ * @param data The data we construct an MPI data type for.
+ */
+ MPIDatatypeInformation(const V& data) : data_(data)
+ {}
+
+ /**
+ * @brief Reserver space for the information about the datatype.
+ * @param proc The rank of the process this information is for.
+ * @param size The number of indices the datatype will contain.
+ */
+ void reserve(int proc, int size)
+ {
+ information_[proc].build(size);
+ }
+ /**
+ * @brief Add a new index to the datatype.
+ * @param proc The rank of the process this index is send to
+ * or received from.
+ * @param local The index to add.
+ */
+ void add(int proc, int local)
+ {
+ IndexedTypeInformation& info=information_[proc];
+ assert((info.elements)<info.size);
+ MPI_Get_address( const_cast<void*>(CommPolicy<V>::getAddress(data_, local)),
+ info.displ+info.elements);
+ info.length[info.elements]=CommPolicy<V>::getSize(data_, local);
+ info.elements++;
+ }
+
+ /**
+ * @brief The information about the datatypes to send to or
+ * receive from each process.
+ */
+ std::map<int,IndexedTypeInformation> information_;
+ /**
+ * @brief A representative of the indexed data we send.
+ */
+ const V& data_;
+
+ };
+
+ };
+
+ /**
+ * @brief A communicator that uses buffers to gather and scatter
+ * the data to be send or received.
+ *
+ * Before the data is sent it is copied to a consecutive buffer and
+ * then that buffer is sent.
+ * The data is received in another buffer and then copied to the actual
+ * position.
+ */
+ class BufferedCommunicator
+ {
+
+ public:
+ /**
+ * @brief Constructor.
+ */
+ BufferedCommunicator();
+
+ /**
+ * @brief Build the buffers and information for the communication process.
+ *
+ *
+ * @param interface The interface that defines what indices are to be communicated.
+ */
+ template<class Data, class Interface>
+ typename std::enable_if<std::is_same<SizeOne,typename CommPolicy<Data>::IndexedTypeFlag>::value, void>::type
+ build(const Interface& interface);
+
+ /**
+ * @brief Build the buffers and information for the communication process.
+ *
+ * @param source The source in a forward send. The values will be copied from here to the send buffers.
+ * @param target The target in a forward send. The received values will be copied to here.
+ * @param interface The interface that defines what indices are to be communicated.
+ */
+ template<class Data, class Interface>
+ void build(const Data& source, const Data& target, const Interface& interface);
+
+ /**
+ * @brief Send from source to target.
+ *
+ * The template parameter GatherScatter (e.g. CopyGatherScatter) has to have a static method
+ * \code
+ * // Gather the data at index index of data
+ * static const typename CommPolicy<Data>::IndexedType>& gather(Data& data, int index);
+ *
+ * // Scatter the value at a index of data
+ * static void scatter(Data& data, typename CommPolicy<Data>::IndexedType> value,
+ * int index);
+ * \endcode
+ * in the case where CommPolicy<Data>::IndexedTypeFlag is SizeOne
+ * and
+ *
+ * \code
+ * static const typename CommPolicy<Data>::IndexedType> gather(Data& data, int index, int subindex);
+ *
+ * static void scatter(Data& data, typename CommPolicy<Data>::IndexedType> value,
+ * int index, int subindex);
+ * \endcode
+ * in the case where CommPolicy<Data>::IndexedTypeFlag is VariableSize. Here subindex is the
+ * subindex of the block at index.
+ * @warning The source and target data have to have the same layout as the ones given
+ * to the build function in case of variable size values at the indices.
+ * @param source The values will be copied from here to the send buffers.
+ * @param dest The received values will be copied to here.
+ */
+ template<class GatherScatter, class Data>
+ void forward(const Data& source, Data& dest);
+
+ /**
+ * @brief Communicate in the reverse direction, i.e. send from target to source.
+ *
+ * The template parameter GatherScatter (e.g. CopyGatherScatter) has to have a static method
+ * \code
+ * // Gather the data at index index of data
+ * static const typename CommPolicy<Data>::IndexedType>& gather(Data& data, int index);
+ *
+ * // Scatter the value at a index of data
+ * static void scatter(Data& data, typename CommPolicy<Data>::IndexedType> value,
+ * int index);
+ * \endcode
+ * in the case where CommPolicy<Data>::IndexedTypeFlag is SizeOne
+ * and
+ *
+ * \code
+ * static const typename CommPolicy<Data>::IndexedType> gather(Data& data, int index, int subindex);
+ *
+ * static void scatter(Data& data, typename CommPolicy<Data>::IndexedType> value,
+ * int index, int subindex);
+ * \endcode
+ * in the case where CommPolicy<Data>::IndexedTypeFlag is VariableSize. Here subindex is the
+ * subindex of the block at index.
+ * @warning The source and target data have to have the same layout as the ones given
+ * to the build function in case of variable size values at the indices.
+ * @param dest The values will be copied from here to the send buffers.
+ * @param source The received values will be copied to here.
+ */
+ template<class GatherScatter, class Data>
+ void backward(Data& source, const Data& dest);
+
+ /**
+ * @brief Forward send where target and source are the same.
+ *
+ * The template parameter GatherScatter has to have a static method
+ * \code
+ * // Gather the data at index index of data
+ * static const typename CommPolicy<Data>::IndexedType>& gather(Data& data, int index);
+ *
+ * // Scatter the value at a index of data
+ * static void scatter(Data& data, typename CommPolicy<Data>::IndexedType> value,
+ * int index);
+ * \endcode
+ * in the case where CommPolicy<Data>::IndexedTypeFlag is SizeOne
+ * and
+ *
+ * \code
+ * static const typename CommPolicy<Data>::IndexedType> gather(Data& data, int index, int subindex);
+ *
+ * static void scatter(Data& data, typename CommPolicy<Data>::IndexedType> value,
+ * int index, int subindex);
+ * \endcode
+ * in the case where CommPolicy<Data>::IndexedTypeFlag is VariableSize. Here subindex is the
+ * subindex of the block at index.
+ * @param data Source and target of the communication.
+ */
+ template<class GatherScatter, class Data>
+ void forward(Data& data);
+
+ /**
+ * @brief Backward send where target and source are the same.
+ *
+ * The template parameter GatherScatter has to have a static method
+ * \code
+ * // Gather the data at index index of data
+ * static const typename CommPolicy<Data>::IndexedType>& gather(Data& data, int index);
+ *
+ * // Scatter the value at a index of data
+ * static void scatter(Data& data, typename CommPolicy<Data>::IndexedType> value,
+ * int index);
+ * \endcode
+ * in the case where CommPolicy<Data>::IndexedTypeFlag is SizeOne
+ * and
+ *
+ * \code
+ * static const typename CommPolicy<Data>::IndexedType> gather(Data& data, int index, int subindex);
+ *
+ * static void scatter(Data& data, typename CommPolicy<Data>::IndexedType> value,
+ * int index, int subindex);
+ * \endcode
+ * in the case where CommPolicy<Data>::IndexedTypeFlag is VariableSize. Here subindex is the
+ * subindex of the block at index.
+ * @param data Source and target of the communication.
+ */
+ template<class GatherScatter, class Data>
+ void backward(Data& data);
+
+ /**
+ * @brief Free the allocated memory (i.e. buffers and message information.
+ */
+ void free();
+
+ /**
+ * @brief Destructor.
+ */
+ ~BufferedCommunicator();
+
+ private:
+
+ /**
+ * @brief The type of the map that maps interface information to processors.
+ */
+ typedef std::map<int,std::pair<InterfaceInformation,InterfaceInformation> >
+ InterfaceMap;
+
+
+ /**
+ * @brief Functors for message size caculation
+ */
+ template<class Data, typename IndexedTypeFlag>
+ struct MessageSizeCalculator
+ {};
+
+ /**
+ * @brief Functor for message size caculation for datatypes
+ * where at each index is only one value.
+ */
+ template<class Data>
+ struct MessageSizeCalculator<Data,SizeOne>
+ {
+ /**
+ * @brief Calculate the number of values in message
+ * @param info The information about the interface corresponding
+ * to the message.
+ * @return The number of values in th message.
+ */
+ inline int operator()(const InterfaceInformation& info) const;
+ /**
+ * @brief Calculate the number of values in message
+ *
+ * @param info The information about the interface corresponding
+ * to the message.
+ * @param data ignored.
+ * @return The number of values in th message.
+ */
+ inline int operator()(const Data& data, const InterfaceInformation& info) const;
+ };
+
+ /**
+ * @brief Functor for message size caculation for datatypes
+ * where at each index can be a variable number of values.
+ */
+ template<class Data>
+ struct MessageSizeCalculator<Data,VariableSize>
+ {
+ /**
+ * @brief Calculate the number of values in message
+ *
+ * @param info The information about the interface corresponding
+ * to the message.
+ * @param data A representative of the data we send.
+ * @return The number of values in th message.
+ */
+ inline int operator()(const Data& data, const InterfaceInformation& info) const;
+ };
+
+ /**
+ * @brief Functors for message data gathering.
+ */
+ template<class Data, class GatherScatter, bool send, typename IndexedTypeFlag>
+ struct MessageGatherer
+ {};
+
+ /**
+ * @brief Functor for message data gathering for datatypes
+ * where at each index is only one value.
+ */
+ template<class Data, class GatherScatter, bool send>
+ struct MessageGatherer<Data,GatherScatter,send,SizeOne>
+ {
+ /** @brief The type of the values we send. */
+ typedef typename CommPolicy<Data>::IndexedType Type;
+
+ /**
+ * @brief The type of the functor that does the actual copying
+ * during the data Scattering.
+ */
+ typedef GatherScatter Gatherer;
+
+ enum {
+ /**
+ * @brief The communication mode
+ *
+ * True if this was a forward communication.
+ */
+ forward=send
+ };
+
+ /**
+ * @brief Copies the values to send into the buffer.
+ * @param interface The interface used in the send.
+ * @param data The data from which we copy the values.
+ * @param buffer The send buffer to copy to.
+ * @param bufferSize The size of the buffer in bytes. For checks.
+ */
+ inline void operator()(const InterfaceMap& interface, const Data& data, Type* buffer, size_t bufferSize) const;
+ };
+
+ /**
+ * @brief Functor for message data scattering for datatypes
+ * where at each index can be a variable size of values
+ */
+ template<class Data, class GatherScatter, bool send>
+ struct MessageGatherer<Data,GatherScatter,send,VariableSize>
+ {
+ /** @brief The type of the values we send. */
+ typedef typename CommPolicy<Data>::IndexedType Type;
+
+ /**
+ * @brief The type of the functor that does the actual copying
+ * during the data Scattering.
+ */
+ typedef GatherScatter Gatherer;
+
+ enum {
+ /**
+ * @brief The communication mode
+ *
+ * True if this was a forward communication.
+ */
+ forward=send
+ };
+
+ /**
+ * @brief Copies the values to send into the buffer.
+ * @param interface The interface used in the send.
+ * @param data The data from which we copy the values.
+ * @param buffer The send buffer to copy to.
+ * @param bufferSize The size of the buffer in bytes. For checks.
+ */
+ inline void operator()(const InterfaceMap& interface, const Data& data, Type* buffer, size_t bufferSize) const;
+ };
+
+ /**
+ * @brief Functors for message data scattering.
+ */
+ template<class Data, class GatherScatter, bool send, typename IndexedTypeFlag>
+ struct MessageScatterer
+ {};
+
+ /**
+ * @brief Functor for message data gathering for datatypes
+ * where at each index is only one value.
+ */
+ template<class Data, class GatherScatter, bool send>
+ struct MessageScatterer<Data,GatherScatter,send,SizeOne>
+ {
+ /** @brief The type of the values we send. */
+ typedef typename CommPolicy<Data>::IndexedType Type;
+
+ /**
+ * @brief The type of the functor that does the actual copying
+ * during the data Scattering.
+ */
+ typedef GatherScatter Scatterer;
+
+ enum {
+ /**
+ * @brief The communication mode
+ *
+ * True if this was a forward communication.
+ */
+ forward=send
+ };
+
+ /**
+ * @brief Copy the message data from the receive buffer to the data.
+ * @param interface The interface used in the send.
+ * @param data The data to which we copy the values.
+ * @param buffer The receive buffer to copy from.
+ * @param proc The rank of the process the message is from.
+ */
+ inline void operator()(const InterfaceMap& interface, Data& data, Type* buffer, const int& proc) const;
+ };
+ /**
+ * @brief Functor for message data scattering for datatypes
+ * where at each index can be a variable size of values
+ */
+ template<class Data, class GatherScatter, bool send>
+ struct MessageScatterer<Data,GatherScatter,send,VariableSize>
+ {
+ /** @brief The type of the values we send. */
+ typedef typename CommPolicy<Data>::IndexedType Type;
+
+ /**
+ * @brief The type of the functor that does the actual copying
+ * during the data Scattering.
+ */
+ typedef GatherScatter Scatterer;
+
+ enum {
+ /**
+ * @brief The communication mode
+ *
+ * True if this was a forward communication.
+ */
+ forward=send
+ };
+
+ /**
+ * @brief Copy the message data from the receive buffer to the data.
+ * @param interface The interface used in the send.
+ * @param data The data to which we copy the values.
+ * @param buffer The receive buffer to copy from.
+ * @param proc The rank of the process the message is from.
+ */
+ inline void operator()(const InterfaceMap& interface, Data& data, Type* buffer, const int& proc) const;
+ };
+
+ /**
+ * @brief Information about a message to send.
+ */
+ struct MessageInformation
+ {
+ /** @brief Constructor. */
+ MessageInformation()
+ : start_(0), size_(0)
+ {}
+
+ /**
+ * @brief Constructor.
+ * @param start The start of the message in the global buffer.
+ * Not in bytes but in number of values from the beginning of
+ * the buffer
+ * @param size The size of the message in bytes.
+ */
+ MessageInformation(size_t start, size_t size)
+ : start_(start), size_(size)
+ {}
+ /**
+ * @brief Start of the message in the buffer counted in number of value.
+ */
+ size_t start_;
+ /**
+ * @brief Number of bytes in the message.
+ */
+ size_t size_;
+ };
+
+ /**
+ * @brief Type of the map of information about the messages to send.
+ *
+ * The key is the process number to communicate with and the value is
+ * the pair of information about sending and receiving messages.
+ */
+ typedef std::map<int,std::pair<MessageInformation,MessageInformation> >
+ InformationMap;
+ /**
+ * @brief Gathered information about the messages to send.
+ */
+ InformationMap messageInformation_;
+ /**
+ * @brief Communication buffers.
+ */
+ char* buffers_[2];
+ /**
+ * @brief The size of the communication buffers
+ */
+ size_t bufferSize_[2];
+
+ enum {
+ /**
+ * @brief The tag we use for communication.
+ */
+ commTag_
+ };
+
+ /**
+ * @brief The interface we currently work with.
+ */
+ std::map<int,std::pair<InterfaceInformation,InterfaceInformation> > interfaces_;
+
+ MPI_Comm communicator_;
+
+ /**
+ * @brief Send and receive Data.
+ */
+ template<class GatherScatter, bool FORWARD, class Data>
+ void sendRecv(const Data& source, Data& target);
+
+ };
+
+#ifndef DOXYGEN
+
+ template<class V>
+ inline const void* CommPolicy<V>::getAddress(const V& v, int index)
+ {
+ return &(v[index]);
+ }
+
+ template<class V>
+ inline int CommPolicy<V>::getSize([[maybe_unused]] const V& v, [[maybe_unused]] int index)
+ {
+ return 1;
+ }
+
+ template<class K, class A, int n>
+ inline const void* CommPolicy<VariableBlockVector<FieldVector<K, n>, A> >::getAddress(const Type& v, int index)
+ {
+ return &(v[index][0]);
+ }
+
+ template<class K, class A, int n>
+ inline int CommPolicy<VariableBlockVector<FieldVector<K, n>, A> >::getSize(const Type& v, int index)
+ {
+ return v[index].getsize();
+ }
+
+ template<class T>
+ inline const typename CopyGatherScatter<T>::IndexedType& CopyGatherScatter<T>::gather(const T & vec, std::size_t i)
+ {
+ return vec[i];
+ }
+
+ template<class T>
+ inline void CopyGatherScatter<T>::scatter(T& vec, const IndexedType& v, std::size_t i)
+ {
+ vec[i]=v;
+ }
+
+ template<typename T>
+ DatatypeCommunicator<T>::DatatypeCommunicator()
+ : remoteIndices_(0), created_(false)
+ {
+ requests_[0]=0;
+ requests_[1]=0;
+ }
+
+
+
+ template<typename T>
+ DatatypeCommunicator<T>::~DatatypeCommunicator()
+ {
+ free();
+ }
+
+ template<typename T>
+ template<class T1, class T2, class V>
+ inline void DatatypeCommunicator<T>::build(const RemoteIndices& remoteIndices,
+ const T1& source, V& sendData,
+ const T2& destination, V& receiveData)
+ {
+ remoteIndices_ = &remoteIndices;
+ free();
+ createDataTypes<T1,T2,V,false>(source,destination, receiveData);
+ createDataTypes<T1,T2,V,true>(source,destination, sendData);
+ createRequests<V,true>(sendData, receiveData);
+ createRequests<V,false>(receiveData, sendData);
+ created_=true;
+ }
+
+ template<typename T>
+ void DatatypeCommunicator<T>::free()
+ {
+ if(created_) {
+ delete[] requests_[0];
+ delete[] requests_[1];
+ typedef MessageTypeMap::iterator iterator;
+ typedef MessageTypeMap::const_iterator const_iterator;
+
+ const const_iterator end=messageTypes.end();
+
+ for(iterator process = messageTypes.begin(); process != end; ++process) {
+ MPI_Datatype *type = &(process->second.first);
+ int finalized=0;
+ MPI_Finalized(&finalized);
+ if(*type!=MPI_DATATYPE_NULL && !finalized)
+ MPI_Type_free(type);
+ type = &(process->second.second);
+ if(*type!=MPI_DATATYPE_NULL && !finalized)
+ MPI_Type_free(type);
+ }
+ messageTypes.clear();
+ created_=false;
+ }
+
+ }
+
+ template<typename T>
+ template<class T1, class T2, class V, bool send>
+ void DatatypeCommunicator<T>::createDataTypes(const T1& sourceFlags, const T2& destFlags, V& data)
+ {
+
+ MPIDatatypeInformation<V> dataInfo(data);
+ this->template buildInterface<RemoteIndices,T1,T2,MPIDatatypeInformation<V>,send>(*remoteIndices_,sourceFlags, destFlags, dataInfo);
+
+ typedef typename RemoteIndices::RemoteIndexMap::const_iterator const_iterator;
+ const const_iterator end=this->remoteIndices_->end();
+
+ // Allocate MPI_Datatypes and deallocate memory for the type construction.
+ for(const_iterator process=this->remoteIndices_->begin(); process != end; ++process) {
+ IndexedTypeInformation& info=dataInfo.information_[process->first];
+ // Shift the displacement
+ MPI_Aint base;
+ MPI_Get_address(const_cast<void *>(CommPolicy<V>::getAddress(data, 0)), &base);
+
+ for(int i=0; i< info.elements; i++) {
+ info.displ[i]-=base;
+ }
+
+ // Create data type
+ MPI_Datatype* type = &( send ? messageTypes[process->first].first : messageTypes[process->first].second);
+ MPI_Type_create_hindexed(info.elements, info.length, info.displ,
+ MPITraits<typename CommPolicy<V>::IndexedType>::getType(), type);
+ MPI_Type_commit(type);
+ // Deallocate memory
+ info.free();
+ }
+ }
+
+ template<typename T>
+ template<class V, bool createForward>
+ void DatatypeCommunicator<T>::createRequests(V& sendData, V& receiveData)
+ {
+ typedef std::map<int,std::pair<MPI_Datatype,MPI_Datatype> >::const_iterator MapIterator;
+ int rank;
+ static int index = createForward ? 1 : 0;
+ int noMessages = messageTypes.size();
+ // allocate request handles
+ requests_[index] = new MPI_Request[2*noMessages];
+ const MapIterator end = messageTypes.end();
+ int request=0;
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+
+ // Set up the requests for receiving first
+ for(MapIterator process = messageTypes.begin(); process != end;
+ ++process, ++request) {
+ MPI_Datatype type = createForward ? process->second.second : process->second.first;
+ void* address = const_cast<void*>(CommPolicy<V>::getAddress(receiveData,0));
+ MPI_Recv_init(address, 1, type, process->first, commTag_, this->remoteIndices_->communicator(), requests_[index]+request);
+ }
+
+ // And now the send requests
+
+ for(MapIterator process = messageTypes.begin(); process != end;
+ ++process, ++request) {
+ MPI_Datatype type = createForward ? process->second.first : process->second.second;
+ void* address = const_cast<void*>(CommPolicy<V>::getAddress(sendData, 0));
+ MPI_Ssend_init(address, 1, type, process->first, commTag_, this->remoteIndices_->communicator(), requests_[index]+request);
+ }
+ }
+
+ template<typename T>
+ void DatatypeCommunicator<T>::forward()
+ {
+ sendRecv(requests_[1]);
+ }
+
+ template<typename T>
+ void DatatypeCommunicator<T>::backward()
+ {
+ sendRecv(requests_[0]);
+ }
+
+ template<typename T>
+ void DatatypeCommunicator<T>::sendRecv(MPI_Request* requests)
+ {
+ int noMessages = messageTypes.size();
+ // Start the receive calls first
+ MPI_Startall(noMessages, requests);
+ // Now the send calls
+ MPI_Startall(noMessages, requests+noMessages);
+
+ // Wait for completion of the communication send first then receive
+ MPI_Status* status=new MPI_Status[2*noMessages];
+ for(int i=0; i<2*noMessages; i++)
+ status[i].MPI_ERROR=MPI_SUCCESS;
+
+ int send = MPI_Waitall(noMessages, requests+noMessages, status+noMessages);
+ int receive = MPI_Waitall(noMessages, requests, status);
+
+ // Error checks
+ int success=1, globalSuccess=0;
+ if(send==MPI_ERR_IN_STATUS) {
+ int rank;
+ MPI_Comm_rank(this->remoteIndices_->communicator(), &rank);
+ std::cerr<<rank<<": Error in sending :"<<std::endl;
+ // Search for the error
+ for(int i=noMessages; i< 2*noMessages; i++)
+ if(status[i].MPI_ERROR!=MPI_SUCCESS) {
+ char message[300];
+ int messageLength;
+ MPI_Error_string(status[i].MPI_ERROR, message, &messageLength);
+ std::cerr<<" source="<<status[i].MPI_SOURCE<<" message: ";
+ for(int j = 0; j < messageLength; j++)
+ std::cout << message[j];
+ }
+ std::cerr<<std::endl;
+ success=0;
+ }
+
+ if(receive==MPI_ERR_IN_STATUS) {
+ int rank;
+ MPI_Comm_rank(this->remoteIndices_->communicator(), &rank);
+ std::cerr<<rank<<": Error in receiving!"<<std::endl;
+ // Search for the error
+ for(int i=0; i< noMessages; i++)
+ if(status[i].MPI_ERROR!=MPI_SUCCESS) {
+ char message[300];
+ int messageLength;
+ MPI_Error_string(status[i].MPI_ERROR, message, &messageLength);
+ std::cerr<<" source="<<status[i].MPI_SOURCE<<" message: ";
+ for(int j = 0; j < messageLength; j++)
+ std::cerr << message[j];
+ }
+ std::cerr<<std::endl;
+ success=0;
+ }
+
+ MPI_Allreduce(&success, &globalSuccess, 1, MPI_INT, MPI_MIN, this->remoteIndices_->communicator());
+
+ delete[] status;
+
+ if(!globalSuccess)
+ DUNE_THROW(CommunicationError, "A communication error occurred!");
+
+ }
+
+ inline BufferedCommunicator::BufferedCommunicator()
+ {
+ buffers_[0]=0;
+ buffers_[1]=0;
+ bufferSize_[0]=0;
+ bufferSize_[1]=0;
+ }
+
+ template<class Data, class Interface>
+ typename std::enable_if<std::is_same<SizeOne, typename CommPolicy<Data>::IndexedTypeFlag>::value, void>::type
+ BufferedCommunicator::build(const Interface& interface)
+ {
+ interfaces_=interface.interfaces();
+ communicator_=interface.communicator();
+ typedef typename std::map<int,std::pair<InterfaceInformation,InterfaceInformation> >
+ ::const_iterator const_iterator;
+ typedef typename CommPolicy<Data>::IndexedTypeFlag Flag;
+ const const_iterator end = interfaces_.end();
+ int lrank;
+ MPI_Comm_rank(communicator_, &lrank);
+
+ bufferSize_[0]=0;
+ bufferSize_[1]=0;
+
+ for(const_iterator interfacePair = interfaces_.begin();
+ interfacePair != end; ++interfacePair) {
+ int noSend = MessageSizeCalculator<Data,Flag>() (interfacePair->second.first);
+ int noRecv = MessageSizeCalculator<Data,Flag>() (interfacePair->second.second);
+ if (noSend + noRecv > 0)
+ messageInformation_.insert(std::make_pair(interfacePair->first,
+ std::make_pair(MessageInformation(bufferSize_[0],
+ noSend*sizeof(typename CommPolicy<Data>::IndexedType)),
+ MessageInformation(bufferSize_[1],
+ noRecv*sizeof(typename CommPolicy<Data>::IndexedType)))));
+ bufferSize_[0] += noSend;
+ bufferSize_[1] += noRecv;
+ }
+
+ // allocate the buffers
+ bufferSize_[0] *= sizeof(typename CommPolicy<Data>::IndexedType);
+ bufferSize_[1] *= sizeof(typename CommPolicy<Data>::IndexedType);
+
+ buffers_[0] = new char[bufferSize_[0]];
+ buffers_[1] = new char[bufferSize_[1]];
+ }
+
+ template<class Data, class Interface>
+ void BufferedCommunicator::build(const Data& source, const Data& dest, const Interface& interface)
+ {
+
+ interfaces_=interface.interfaces();
+ communicator_=interface.communicator();
+ typedef typename std::map<int,std::pair<InterfaceInformation,InterfaceInformation> >
+ ::const_iterator const_iterator;
+ typedef typename CommPolicy<Data>::IndexedTypeFlag Flag;
+ const const_iterator end = interfaces_.end();
+
+ bufferSize_[0]=0;
+ bufferSize_[1]=0;
+
+ for(const_iterator interfacePair = interfaces_.begin();
+ interfacePair != end; ++interfacePair) {
+ int noSend = MessageSizeCalculator<Data,Flag>() (source, interfacePair->second.first);
+ int noRecv = MessageSizeCalculator<Data,Flag>() (dest, interfacePair->second.second);
+ if (noSend + noRecv > 0)
+ messageInformation_.insert(std::make_pair(interfacePair->first,
+ std::make_pair(MessageInformation(bufferSize_[0],
+ noSend*sizeof(typename CommPolicy<Data>::IndexedType)),
+ MessageInformation(bufferSize_[1],
+ noRecv*sizeof(typename CommPolicy<Data>::IndexedType)))));
+ bufferSize_[0] += noSend;
+ bufferSize_[1] += noRecv;
+ }
+
+ bufferSize_[0] *= sizeof(typename CommPolicy<Data>::IndexedType);
+ bufferSize_[1] *= sizeof(typename CommPolicy<Data>::IndexedType);
+ // allocate the buffers
+ buffers_[0] = new char[bufferSize_[0]];
+ buffers_[1] = new char[bufferSize_[1]];
+ }
+
+ inline void BufferedCommunicator::free()
+ {
+ messageInformation_.clear();
+ if(buffers_[0])
+ delete[] buffers_[0];
+
+ if(buffers_[1])
+ delete[] buffers_[1];
+ buffers_[0]=buffers_[1]=0;
+ }
+
+ inline BufferedCommunicator::~BufferedCommunicator()
+ {
+ free();
+ }
+
+ template<class Data>
+ inline int BufferedCommunicator::MessageSizeCalculator<Data,SizeOne>::operator()
+ (const InterfaceInformation& info) const
+ {
+ return info.size();
+ }
+
+
+ template<class Data>
+ inline int BufferedCommunicator::MessageSizeCalculator<Data,SizeOne>::operator()
+ (const Data&, const InterfaceInformation& info) const
+ {
+ return operator()(info);
+ }
+
+
+ template<class Data>
+ inline int BufferedCommunicator::MessageSizeCalculator<Data, VariableSize>::operator()
+ (const Data& data, const InterfaceInformation& info) const
+ {
+ int entries=0;
+
+ for(size_t i=0; i < info.size(); i++)
+ entries += CommPolicy<Data>::getSize(data,info[i]);
+
+ return entries;
+ }
+
+
+ template<class Data, class GatherScatter, bool FORWARD>
+ inline void BufferedCommunicator::MessageGatherer<Data,GatherScatter,FORWARD,VariableSize>::operator()(const InterfaceMap& interfaces,const Data& data, Type* buffer, [[maybe_unused]] size_t bufferSize) const
+ {
+ typedef typename InterfaceMap::const_iterator
+ const_iterator;
+
+ int rank;
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ const const_iterator end = interfaces.end();
+ size_t index=0;
+
+ for(const_iterator interfacePair = interfaces.begin();
+ interfacePair != end; ++interfacePair) {
+ int size = forward ? interfacePair->second.first.size() :
+ interfacePair->second.second.size();
+
+ for(int i=0; i < size; i++) {
+ int local = forward ? interfacePair->second.first[i] :
+ interfacePair->second.second[i];
+ for(std::size_t j=0; j < CommPolicy<Data>::getSize(data, local); j++, index++) {
+
+#ifdef DUNE_ISTL_WITH_CHECKING
+ assert(bufferSize>=(index+1)*sizeof(typename CommPolicy<Data>::IndexedType));
+#endif
+ buffer[index]=GatherScatter::gather(data, local, j);
+ }
+
+ }
+ }
+
+ }
+
+
+ template<class Data, class GatherScatter, bool FORWARD>
+ inline void BufferedCommunicator::MessageGatherer<Data,GatherScatter,FORWARD,SizeOne>::operator()(
+ const InterfaceMap& interfaces, const Data& data, Type* buffer, [[maybe_unused]] size_t bufferSize) const
+ {
+ typedef typename InterfaceMap::const_iterator
+ const_iterator;
+ const const_iterator end = interfaces.end();
+ size_t index = 0;
+
+ int rank;
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+
+ for(const_iterator interfacePair = interfaces.begin();
+ interfacePair != end; ++interfacePair) {
+ size_t size = FORWARD ? interfacePair->second.first.size() :
+ interfacePair->second.second.size();
+
+ for(size_t i=0; i < size; i++) {
+
+#ifdef DUNE_ISTL_WITH_CHECKING
+ assert(bufferSize>=(index+1)*sizeof(typename CommPolicy<Data>::IndexedType));
+#endif
+
+ buffer[index++] = GatherScatter::gather(data, FORWARD ? interfacePair->second.first[i] :
+ interfacePair->second.second[i]);
+ }
+ }
+
+ }
+
+
+ template<class Data, class GatherScatter, bool FORWARD>
+ inline void BufferedCommunicator::MessageScatterer<Data,GatherScatter,FORWARD,VariableSize>::operator()(const InterfaceMap& interfaces, Data& data, Type* buffer, const int& proc) const
+ {
+ typedef typename InterfaceMap::value_type::second_type::first_type Information;
+ const typename InterfaceMap::const_iterator infoPair = interfaces.find(proc);
+
+ assert(infoPair!=interfaces.end());
+
+ const Information& info = FORWARD ? infoPair->second.second :
+ infoPair->second.first;
+
+ for(size_t i=0, index=0; i < info.size(); i++) {
+ for(size_t j=0; j < CommPolicy<Data>::getSize(data, info[i]); j++)
+ GatherScatter::scatter(data, buffer[index++], info[i], j);
+ }
+ }
+
+
+ template<class Data, class GatherScatter, bool FORWARD>
+ inline void BufferedCommunicator::MessageScatterer<Data,GatherScatter,FORWARD,SizeOne>::operator()(const InterfaceMap& interfaces, Data& data, Type* buffer, const int& proc) const
+ {
+ typedef typename InterfaceMap::value_type::second_type::first_type Information;
+ const typename InterfaceMap::const_iterator infoPair = interfaces.find(proc);
+
+ assert(infoPair!=interfaces.end());
+
+ const Information& info = FORWARD ? infoPair->second.second :
+ infoPair->second.first;
+
+ for(size_t i=0; i < info.size(); i++) {
+ GatherScatter::scatter(data, buffer[i], info[i]);
+ }
+ }
+
+
+ template<class GatherScatter,class Data>
+ void BufferedCommunicator::forward(Data& data)
+ {
+ this->template sendRecv<GatherScatter,true>(data, data);
+ }
+
+
+ template<class GatherScatter, class Data>
+ void BufferedCommunicator::backward(Data& data)
+ {
+ this->template sendRecv<GatherScatter,false>(data, data);
+ }
+
+
+ template<class GatherScatter, class Data>
+ void BufferedCommunicator::forward(const Data& source, Data& dest)
+ {
+ this->template sendRecv<GatherScatter,true>(source, dest);
+ }
+
+
+ template<class GatherScatter, class Data>
+ void BufferedCommunicator::backward(Data& source, const Data& dest)
+ {
+ this->template sendRecv<GatherScatter,false>(dest, source);
+ }
+
+
+ template<class GatherScatter, bool FORWARD, class Data>
+ void BufferedCommunicator::sendRecv(const Data& source, Data& dest)
+ {
+ int rank, lrank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD,&rank);
+ MPI_Comm_rank(MPI_COMM_WORLD,&lrank);
+
+ typedef typename CommPolicy<Data>::IndexedType Type;
+ Type *sendBuffer, *recvBuffer;
+ size_t sendBufferSize;
+#ifndef NDEBUG
+ size_t recvBufferSize;
+#endif
+
+ if(FORWARD) {
+ sendBuffer = reinterpret_cast<Type*>(buffers_[0]);
+ sendBufferSize = bufferSize_[0];
+ recvBuffer = reinterpret_cast<Type*>(buffers_[1]);
+#ifndef NDEBUG
+ recvBufferSize = bufferSize_[1];
+#endif
+ }else{
+ sendBuffer = reinterpret_cast<Type*>(buffers_[1]);
+ sendBufferSize = bufferSize_[1];
+ recvBuffer = reinterpret_cast<Type*>(buffers_[0]);
+#ifndef NDEBUG
+ recvBufferSize = bufferSize_[0];
+#endif
+ }
+ typedef typename CommPolicy<Data>::IndexedTypeFlag Flag;
+
+ MessageGatherer<Data,GatherScatter,FORWARD,Flag>() (interfaces_, source, sendBuffer, sendBufferSize);
+
+ MPI_Request* sendRequests = new MPI_Request[messageInformation_.size()];
+ MPI_Request* recvRequests = new MPI_Request[messageInformation_.size()];
+ /* Number of recvRequests that are not MPI_REQUEST_NULL */
+ size_t numberOfRealRecvRequests = 0;
+
+ // Setup receive first
+ typedef typename InformationMap::const_iterator const_iterator;
+
+ const const_iterator end = messageInformation_.end();
+ size_t i=0;
+ int* processMap = new int[messageInformation_.size()];
+
+ for(const_iterator info = messageInformation_.begin(); info != end; ++info, ++i) {
+ processMap[i]=info->first;
+ if(FORWARD) {
+ assert(info->second.second.start_*sizeof(typename CommPolicy<Data>::IndexedType)+info->second.second.size_ <= recvBufferSize );
+ Dune::dvverb<<rank<<": receiving "<<info->second.second.size_<<" from "<<info->first<<std::endl;
+ if(info->second.second.size_) {
+ MPI_Irecv(recvBuffer+info->second.second.start_, info->second.second.size_,
+ MPI_BYTE, info->first, commTag_, communicator_,
+ recvRequests+i);
+ numberOfRealRecvRequests += 1;
+ } else {
+ // Nothing to receive -> set request to inactive
+ recvRequests[i]=MPI_REQUEST_NULL;
+ }
+ }else{
+ assert(info->second.first.start_*sizeof(typename CommPolicy<Data>::IndexedType)+info->second.first.size_ <= recvBufferSize );
+ Dune::dvverb<<rank<<": receiving "<<info->second.first.size_<<" to "<<info->first<<std::endl;
+ if(info->second.first.size_) {
+ MPI_Irecv(recvBuffer+info->second.first.start_, info->second.first.size_,
+ MPI_BYTE, info->first, commTag_, communicator_,
+ recvRequests+i);
+ numberOfRealRecvRequests += 1;
+ } else {
+ // Nothing to receive -> set request to inactive
+ recvRequests[i]=MPI_REQUEST_NULL;
+ }
+ }
+ }
+
+ // now the send requests
+ i=0;
+ for(const_iterator info = messageInformation_.begin(); info != end; ++info, ++i)
+ if(FORWARD) {
+ assert(info->second.second.start_*sizeof(typename CommPolicy<Data>::IndexedType)+info->second.second.size_ <= recvBufferSize );
+ Dune::dvverb<<rank<<": sending "<<info->second.first.size_<<" to "<<info->first<<std::endl;
+ assert(info->second.first.start_*sizeof(typename CommPolicy<Data>::IndexedType)+info->second.first.size_ <= sendBufferSize );
+ if(info->second.first.size_)
+ MPI_Issend(sendBuffer+info->second.first.start_, info->second.first.size_,
+ MPI_BYTE, info->first, commTag_, communicator_,
+ sendRequests+i);
+ else
+ // Nothing to send -> set request to inactive
+ sendRequests[i]=MPI_REQUEST_NULL;
+ }else{
+ assert(info->second.second.start_*sizeof(typename CommPolicy<Data>::IndexedType)+info->second.second.size_ <= sendBufferSize );
+ Dune::dvverb<<rank<<": sending "<<info->second.second.size_<<" to "<<info->first<<std::endl;
+ if(info->second.second.size_)
+ MPI_Issend(sendBuffer+info->second.second.start_, info->second.second.size_,
+ MPI_BYTE, info->first, commTag_, communicator_,
+ sendRequests+i);
+ else
+ // Nothing to send -> set request to inactive
+ sendRequests[i]=MPI_REQUEST_NULL;
+ }
+
+ // Wait for completion of receive and immediately start scatter
+ i=0;
+ //int success = 1;
+ int finished = MPI_UNDEFINED;
+ MPI_Status status; //[messageInformation_.size()];
+ //MPI_Waitall(messageInformation_.size(), recvRequests, status);
+
+ for(i=0; i< numberOfRealRecvRequests; i++) {
+ status.MPI_ERROR=MPI_SUCCESS;
+ MPI_Waitany(messageInformation_.size(), recvRequests, &finished, &status);
+ assert(finished != MPI_UNDEFINED);
+
+ if(status.MPI_ERROR==MPI_SUCCESS) {
+ int& proc = processMap[finished];
+ typename InformationMap::const_iterator infoIter = messageInformation_.find(proc);
+ assert(infoIter != messageInformation_.end());
+
+ MessageInformation info = (FORWARD) ? infoIter->second.second : infoIter->second.first;
+ assert(info.start_+info.size_ <= recvBufferSize);
+
+ MessageScatterer<Data,GatherScatter,FORWARD,Flag>() (interfaces_, dest, recvBuffer+info.start_, proc);
+ }else{
+ std::cerr<<rank<<": MPI_Error occurred while receiving message from "<<processMap[finished]<<std::endl;
+ //success=0;
+ }
+ }
+
+ MPI_Status recvStatus;
+
+ // Wait for completion of sends
+ for(i=0; i< messageInformation_.size(); i++)
+ if(MPI_SUCCESS!=MPI_Wait(sendRequests+i, &recvStatus)) {
+ std::cerr<<rank<<": MPI_Error occurred while sending message to "<<processMap[finished]<<std::endl;
+ //success=0;
+ }
+ /*
+ int globalSuccess;
+ MPI_Allreduce(&success, &globalSuccess, 1, MPI_INT, MPI_MIN, interface_->communicator());
+
+ if(!globalSuccess)
+ DUNE_THROW(CommunicationError, "A communication error occurred!");
+ */
+ delete[] processMap;
+ delete[] sendRequests;
+ delete[] recvRequests;
+
+ }
+
+#endif // DOXYGEN
+
+ /** @} */
+}
+
+#endif // HAVE_MPI
+
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_COMMON_PARALLEL_FUTURE_HH
+#define DUNE_COMMON_PARALLEL_FUTURE_HH
+
+#include <memory>
+#include <dune/common/exceptions.hh>
+
+namespace Dune{
+
+ /*! \brief This exception is thrown when `ready()`, `wait()` or `get()` is
+ called on an invalid future. A future is valid until `get()` is called and
+ if it is not default-constructed and it was not moved from.
+ */
+ class InvalidFutureException : public InvalidStateException
+ {};
+
+ // forward declaration
+ template<class T>
+ class PseudoFuture;
+
+ /*! \brief Type-erasure for future-like objects. A future-like object is a
+ object satisfying the interface of FutureBase.
+ */
+ template<class T>
+ class Future{
+ // Future interface:
+ class FutureBase{
+ public:
+ virtual ~FutureBase() = default;
+ virtual void wait() = 0;
+ virtual bool ready() const = 0;
+ virtual bool valid() const = 0;
+ virtual T get() = 0;
+ };
+
+ // model class
+ template<class F>
+ class FutureModel
+ : public FutureBase
+ {
+ F _future;
+ public:
+ FutureModel(F&& f)
+ : _future(std::forward<F>(f))
+ {}
+
+ virtual void wait() override
+ {
+ _future.wait();
+ }
+
+ virtual bool ready() const override
+ {
+ return _future.ready();
+ }
+
+ virtual bool valid() const override
+ {
+ return _future.valid();
+ }
+
+ virtual T get() override{
+ return (T)_future.get();
+ }
+ };
+
+ std::unique_ptr<FutureBase> _future;
+ public:
+ template<class F>
+ Future(F&& f)
+ : _future(std::make_unique<FutureModel<F>>(std::forward<F>(f)))
+ {}
+
+ template<class U, std::enable_if_t<std::is_same<U,T>::value && !std::is_same<T,void>::value>>
+ Future(U&& data)
+ : _future(std::make_unique<FutureModel<PseudoFuture<T>>>(PseudoFuture<T>(std::forward<U>(data))))
+ {}
+
+ Future() = default;
+
+ /*! \brief wait until the future is ready
+ \throws InvalidFutureException
+ */
+ void wait(){
+ _future->wait();
+ }
+
+ /*! \brief Waits until the future is ready and returns the resulting value
+ \returns The contained value
+ \throws InvalidFutureException
+ */
+ T get() {
+ return _future->get();
+ }
+
+ /*! \brief
+ \returns true is the future is ready, otherwise false
+ \throws InvalidFutureException
+ */
+ bool ready() const {
+ return _future->ready();
+ }
+
+ /*! \brief Checks whether the future is valid. I.e. `get()' was not called
+ on that future and when it was not default-constructed and not moved
+ from.
+ \returns true is the future is valid, otherwise false
+ */
+ bool valid() const {
+ if(_future)
+ return _future->valid();
+ return false;
+ }
+ };
+
+ /*! \brief A wrapper-class for a object which is ready immediately.
+ */
+ template<class T>
+ class PseudoFuture{
+ bool valid_;
+ T data_;
+ public:
+ PseudoFuture() :
+ valid_(false)
+ {}
+
+ template<class U>
+ PseudoFuture(U&& u) :
+ valid_(true),
+ data_(std::forward<U>(u))
+ {}
+
+ void wait() {
+ if(!valid_)
+ DUNE_THROW(InvalidFutureException, "The PseudoFuture is not valid");
+ }
+
+ bool ready() const {
+ if(!valid_)
+ DUNE_THROW(InvalidFutureException, "The PseudoFuture is not valid");
+ return true;
+ }
+
+ T get() {
+ if(!valid_)
+ DUNE_THROW(InvalidFutureException, "The PseudoFuture is not valid");
+ valid_ = false;
+ return std::forward<T>(data_);
+ }
+
+ bool valid() const {
+ return valid_;
+ }
+ };
+
+ template<>
+ class PseudoFuture<void>{
+ bool valid_;
+ public:
+ PseudoFuture(bool valid = false) :
+ valid_(valid)
+ {}
+
+ void wait(){
+ if(!valid_)
+ DUNE_THROW(InvalidFutureException, "The PseudoFuture is not valid");
+ }
+ bool ready() const{
+ if(!valid_)
+ DUNE_THROW(InvalidFutureException, "The PseudoFuture is not valid");
+ return true;
+ }
+
+ void get(){
+ if(!valid_)
+ DUNE_THROW(InvalidFutureException, "The PseudoFuture is not valid");
+ valid_ = false;
+ }
+
+ bool valid() const{
+ return valid_;
+ }
+ };
+}
+
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_INDEXSET_HH
+#define DUNE_INDEXSET_HH
+
+#include <algorithm>
+#include <dune/common/arraylist.hh>
+#include <dune/common/exceptions.hh>
+#include <iostream>
+
+#include "localindex.hh"
+
+#include <stdint.h> // for uint32_t
+
+namespace Dune
+{
+ /** @addtogroup Common_Parallel
+ *
+ * @{
+ */
+ /**
+ * @file
+ * @brief Provides a map between global and local indices.
+ * @author Markus Blatt
+ */
+ // forward declarations
+
+ template<class TG, class TL>
+ class IndexPair;
+
+ /**
+ * @brief Print an index pair.
+ * @param os The outputstream to print to.
+ * @param pair The index pair to print.
+ */
+ template<class TG, class TL>
+ std::ostream& operator<<(std::ostream& os, const IndexPair<TG,TL>& pair);
+
+ template<class TG, class TL>
+ bool operator==(const IndexPair<TG,TL>&, const IndexPair<TG,TL>&);
+
+ template<class TG, class TL>
+ bool operator!=(const IndexPair<TG,TL>&, const IndexPair<TG,TL>&);
+
+ template<class TG, class TL>
+ bool operator<(const IndexPair<TG,TL>&, const IndexPair<TG,TL>&);
+
+ template<class TG, class TL>
+ bool operator>(const IndexPair<TG,TL>&, const IndexPair<TG,TL>&);
+
+ template<class TG, class TL>
+ bool operator<=(const IndexPair<TG,TL>&, const IndexPair<TG,TL>&);
+
+ template<class TG, class TL>
+ bool operator >=(const IndexPair<TG,TL>&, const IndexPair<TG,TL>&);
+
+ template<class TG, class TL>
+ bool operator==(const IndexPair<TG,TL>&, const TG&);
+
+ template<class TG, class TL>
+ bool operator!=(const IndexPair<TG,TL>&, const TG&);
+
+ template<class TG, class TL>
+ bool operator<(const IndexPair<TG,TL>&, const TG&);
+
+ template<class TG, class TL>
+ bool operator>(const IndexPair<TG,TL>&, const TG&);
+
+ template<class TG, class TL>
+ bool operator<=(const IndexPair<TG,TL>&, const TG&);
+
+ template<class TG, class TL>
+ bool operator >=(const IndexPair<TG,TL>&, const TG&);
+
+ template<typename T>
+ struct MPITraits;
+
+ /**
+ * @brief A pair consisting of a global and local index.
+ */
+ template<class TG, class TL>
+ class IndexPair
+ {
+ friend std::ostream& operator<<<>(std::ostream&, const IndexPair<TG,TL>&);
+ friend bool operator==<>(const IndexPair<TG,TL>&, const IndexPair<TG,TL>&);
+ friend bool operator!=<>(const IndexPair<TG,TL>&, const IndexPair<TG,TL>&);
+ friend bool operator< <>(const IndexPair<TG,TL>&, const IndexPair<TG,TL>&);
+ friend bool operator><>(const IndexPair<TG,TL>&, const IndexPair<TG,TL>&);
+ friend bool operator<=<>(const IndexPair<TG,TL>&, const IndexPair<TG,TL>&);
+ friend bool operator>=<>(const IndexPair<TG,TL>&, const IndexPair<TG,TL>&);
+ friend bool operator==<>(const IndexPair<TG,TL>&, const TG &);
+ friend bool operator!=<>(const IndexPair<TG,TL>&, const TG &);
+ friend bool operator< <>(const IndexPair<TG,TL>&, const TG &);
+ friend bool operator> <>(const IndexPair<TG,TL>&, const TG &);
+ friend bool operator<=<>(const IndexPair<TG,TL>&, const TG &);
+ friend bool operator>=<>(const IndexPair<TG,TL>&, const TG &);
+ friend struct MPITraits<IndexPair<TG,TL> >;
+
+ public:
+ /**
+ * @brief the type of the global index.
+ *
+ * This type has to provide at least a operator< for sorting.
+ */
+ typedef TG GlobalIndex;
+
+ /**
+ * @brief the type of the local index.
+ *
+ * This class to provide the following functions:
+ * \code
+ * LocalIndex operator=(int);
+ * operator int() const;
+ * LocalIndexState state() const;
+ * void setState(LocalIndexState);
+ * \endcode
+ */
+ typedef TL LocalIndex;
+
+ /**
+ * @brief Constructs a new Pair.
+ *
+ * @param global The global index.
+ * @param local The local index.
+ */
+ IndexPair(const GlobalIndex& global, const LocalIndex& local);
+
+ /**
+ * @brief Construct a new Pair.
+ */
+ IndexPair();
+ /**
+ * @brief Constructs a new Pair.
+ *
+ * The local index will be 0.
+ * @param global The global index.
+ */
+ IndexPair(const GlobalIndex& global);
+
+ /**
+ * @brief Get the global index.
+ *
+ * @return The global index.
+ */
+ inline const GlobalIndex& global() const;
+
+ /**
+ * @brief Get the local index.
+ *
+ * @return The local index.
+ */
+ inline LocalIndex& local();
+
+ /**
+ * @brief Get the local index.
+ *
+ * @return The local index.
+ */
+ inline const LocalIndex& local() const;
+
+ /**
+ * @brief Set the local index.
+ *
+ * @param index The index to set it to.
+ */
+ inline void setLocal(int index);
+ private:
+ /** @brief The global index. */
+ GlobalIndex global_;
+ /** @brief The local index. */
+ LocalIndex local_;
+ };
+
+ /**
+ * @brief The states the index set can be in.
+ * @see ParallelIndexSet::state_
+ */
+ enum ParallelIndexSetState
+ {
+ /**
+ * @brief The default mode.
+ * Indicates that the index set is ready to be used.
+ */
+ GROUND,
+ /**
+ * @brief Indicates that the index set is currently being resized.
+ */
+ RESIZE
+ /**
+ * @brief Indicates that all previously deleted indices are now deleted.
+ *
+ CLEAN,
+ **
+ * @brief Indicates that the index set is currently being reordered.
+ *
+ REORDER
+ */
+ };
+
+ /**
+ * @brief Exception indicating that the index set is not in the expected state.
+ */
+ class InvalidIndexSetState : public InvalidStateException {};
+
+ // Forward declaration
+ template<class I> class GlobalLookupIndexSet;
+
+ /**
+ * @brief Manager class for the mapping between local indices and globally unique indices.
+ *
+ * The mapping is between a globally unique id and local index. The local index is consecutive
+ * and non persistent while the global id might not be consecutive but definitely is persistent.
+ */
+ template<typename TG, typename TL, int N=100>
+ class ParallelIndexSet
+ {
+ friend class GlobalLookupIndexSet<ParallelIndexSet<TG,TL,N> >;
+
+ public:
+ /**
+ * @brief the type of the global index.
+ * This type has to provide at least a operator< for sorting.
+ */
+ typedef TG GlobalIndex;
+
+ /**
+ * @brief The type of the local index, e.g. ParallelLocalIndex.
+ *
+ * This class to provide the following functions:
+ * \code
+ * LocalIndex operator=(int);
+ * operator int() const;
+ * LocalIndexState state() const;
+ * void setState(LocalIndexState);
+ * \endcode
+ */
+ typedef TL LocalIndex;
+
+ /**
+ * @brief The type of the pair stored.
+ */
+ typedef Dune::IndexPair<GlobalIndex,LocalIndex> IndexPair;
+
+ enum {
+ /**
+ * @brief The size of the individual arrays in the underlying ArrayList.
+ *
+ * The default value is 100.
+ * @see ArrayList::size
+ */
+ arraySize= (N>0) ? N : 1
+ };
+
+ /** @brief The iterator over the pairs. */
+ class iterator :
+ public ArrayList<IndexPair,N>::iterator
+ {
+ typedef typename ArrayList<IndexPair,N>::iterator
+ Father;
+ friend class ParallelIndexSet<GlobalIndex,LocalIndex,N>;
+ public:
+ iterator(ParallelIndexSet<TG,TL,N>& indexSet, const Father& father)
+ : Father(father), indexSet_(&indexSet)
+ {}
+
+ private:
+ /**
+ * @brief Mark the index as deleted.
+ *
+ * The deleted flag will be set in the local index.
+ * The index will be removed in the endResize method of the
+ * index set.
+ *
+ * @exception InvalidIndexSetState only when NDEBUG is not defined
+ */
+ inline void markAsDeleted() const
+ {
+#ifndef NDEBUG
+ if(indexSet_->state_ != RESIZE)
+ DUNE_THROW(InvalidIndexSetState, "Indices can only be removed "
+ <<"while in RESIZE state!");
+#endif
+ Father::operator*().local().setState(DELETED);
+ }
+
+ /** @brief The index set we are an iterator of. */
+ ParallelIndexSet<TG,TL,N>* indexSet_;
+
+ };
+
+
+
+ /** @brief The constant iterator over the pairs. */
+ typedef typename
+ ArrayList<IndexPair,N>::const_iterator
+ const_iterator;
+
+ /**
+ * @brief Constructor.
+ */
+ ParallelIndexSet();
+
+ /**
+ * @brief Get the state the index set is in.
+ * @return The state of the index set.
+ */
+ inline const ParallelIndexSetState& state()
+ {
+ return state_;
+ }
+
+ /**
+ * @brief Indicate that the index set is to be resized.
+ * @exception InvalidState If index set was not in
+ * ParallelIndexSetState::GROUND mode.
+ */
+ void beginResize();
+
+ /**
+ * @brief Add an new index to the set.
+ *
+ * The local index is created by the default constructor.
+ * @param global The globally unique id of the index.
+ * @exception InvalidState If index set is not in
+ * ParallelIndexSetState::RESIZE mode.
+ */
+ inline void add(const GlobalIndex& global);
+
+ /**
+ * @brief Add an new index to the set.
+ *
+ * @param global The globally unique id of the index.
+ * @param local The local index.
+ * @exception InvalidState If index set is not in
+ * ParallelIndexSetState::RESIZE mode.
+ */
+ inline void add(const GlobalIndex& global, const LocalIndex& local);
+
+ /**
+ * @brief Mark an index as deleted.
+ *
+ * The index will be deleted during endResize().
+ * @param position An iterator at the position we want to delete.
+ * @exception InvalidState If index set is not in ParallelIndexSetState::RESIZE mode.
+ */
+ inline void markAsDeleted(const iterator& position);
+
+ /**
+ * @brief Indicate that the resizing finishes.
+ *
+ * @warning Invalidates all pointers stored to the elements of this index set.
+ * The local indices will be ordered
+ * according to the global indices:
+ * Let \f$(g_i,l_i)_{i=0}^N \f$ be the set of all indices then \f$l_i < l_j\f$
+ * if and
+ * only if \f$g_i < g_j\f$ for arbitrary \f$i \neq j\f$.
+ * @exception InvalidState If index set was not in
+ * ParallelIndexSetState::RESIZE mode.
+ */
+ void endResize();
+
+ /**
+ * @brief Find the index pair with a specific global id.
+ *
+ * This starts a binary search for the entry and therefore has complexity
+ * log(N).
+ * @param global The globally unique id of the pair.
+ * @return The pair of indices for the id.
+ * @warning If the global index is not in the set a wrong or even a
+ * null reference might be returned. To be save use the throwing alternative at.
+ */
+ inline IndexPair&
+ operator[](const GlobalIndex& global);
+
+ /**
+ * @brief Find the index pair with a specific global id.
+ *
+ * This starts a binary search for the entry and therefore has complexity
+ * log(N).
+ * @param global The globally unique id of the pair.
+ * @return The pair of indices for the id.
+ * @exception RangeError Thrown if the global id is not known.
+ */
+ inline IndexPair&
+ at(const GlobalIndex& global);
+
+ /**
+ * @brief Find the index pair with a specific global id.
+ *
+ * This starts a binary search for the entry and therefore has complexity
+ * log(N).
+ * @param global The globally unique id of the pair.
+ * @return The pair of indices for the id.
+ * @exception RangeError Thrown if the global id is not known.
+ */
+ inline bool
+ exists (const GlobalIndex& global) const;
+
+ /**
+ * @brief Find the index pair with a specific global id.
+ *
+ * This starts a binary search for the entry and therefore has complexity
+ * log(N).
+ * @param global The globally unique id of the pair.
+ * @return The pair of indices for the id.
+ * @warning If the global index is not in the set a wrong or even a
+ * null reference might be returned. To be save use the throwing alternative at.
+ */
+ inline const IndexPair&
+ operator[](const GlobalIndex& global) const;
+
+ /**
+ * @brief Find the index pair with a specific global id.
+ *
+ * This starts a binary search for the entry and therefore has complexity
+ * log(N).
+ * @param global The globally unique id of the pair.
+ * @return The pair of indices for the id.
+ * @exception RangeError Thrown if the global id is not known.
+ */
+ inline const IndexPair&
+ at(const GlobalIndex& global) const;
+
+ /**
+ * @brief Get an iterator over the indices positioned at the first index.
+ * @return Iterator over the local indices.
+ */
+ inline iterator begin();
+
+ /**
+ * @brief Get an iterator over the indices positioned after the last index.
+ * @return Iterator over the local indices.
+ */
+ inline iterator end();
+
+ /**
+ * @brief Get an iterator over the indices positioned at the first index.
+ * @return Iterator over the local indices.
+ */
+ inline const_iterator begin() const;
+
+ /**
+ * @brief Get an iterator over the indices positioned after the last index.
+ * @return Iterator over the local indices.
+ */
+ inline const_iterator end() const;
+
+ /**
+ * @brief Renumbers the local index numbers.
+ *
+ * After this function returns the indices are
+ * consecutively numbered beginning from 0. Let
+ * $(g_i,l_i)$, $(g_j,l_j)$ be two arbitrary index
+ * pairs with $g_i<g_j$ then after renumbering
+ * $l_i<l_j$ will hold.
+ */
+ inline void renumberLocal();
+
+ /**
+ * @brief Get the internal sequence number.
+ *
+ * Is initially 0 is incremented for each resize.
+ * @return The sequence number.
+ */
+ inline int seqNo() const;
+
+ /**
+ * @brief Get the total number (public and nonpublic) indices.
+ * @return The total number (public and nonpublic) indices.
+ */
+ inline size_t size() const;
+
+ private:
+ /** @brief The index pairs. */
+ ArrayList<IndexPair,N> localIndices_;
+ /** @brief The new indices for the RESIZE state. */
+ ArrayList<IndexPair,N> newIndices_;
+ /** @brief The state of the index set. */
+ ParallelIndexSetState state_;
+ /** @brief Number to keep track of the number of resizes. */
+ int seqNo_;
+ /** @brief Whether entries were deleted in resize mode. */
+ bool deletedEntries_;
+ /**
+ * @brief Merges the _localIndices and newIndices arrays and creates a new
+ * localIndices array.
+ */
+ inline void merge();
+ };
+
+
+ /**
+ * @brief Print an index set.
+ * @param os The outputstream to print to.
+ * @param indexSet The index set to print.
+ */
+ template<class TG, class TL, int N>
+ std::ostream& operator<<(std::ostream& os, const ParallelIndexSet<TG,TL,N>& indexSet);
+
+ /**
+ * @brief Decorates an index set with the possibility to find a global index
+ * that is mapped to a specific local.
+ *
+ */
+ template<class I>
+ class GlobalLookupIndexSet
+ {
+ public:
+ /**
+ * @brief The type of the index set.
+ */
+ typedef I ParallelIndexSet;
+
+ /**
+ * @brief The type of the local index.
+ */
+ typedef typename ParallelIndexSet::LocalIndex LocalIndex;
+
+ /**
+ * @brief The type of the global index.
+ */
+ typedef typename ParallelIndexSet::GlobalIndex GlobalIndex;
+
+ /**
+ * @brief The iterator over the index pairs.
+ */
+ typedef typename ParallelIndexSet::const_iterator const_iterator;
+
+ typedef Dune::IndexPair<typename I::GlobalIndex, typename I::LocalIndex> IndexPair;
+
+ /**
+ * @brief Constructor.
+ * @param indexset The index set we want to be able to lookup the corresponding
+ * global index of a local index.
+ * @param size The number of indices present, i.e. one more than the maximum local index.
+ */
+ GlobalLookupIndexSet(const ParallelIndexSet& indexset, std::size_t size);
+
+ /**
+ * @brief Constructor.
+ * @param indexset The index set we want to be able to lookup the corresponding
+ * global index of a local index.
+ */
+ GlobalLookupIndexSet(const ParallelIndexSet& indexset);
+
+ /**
+ * @brief Destructor.
+ */
+ ~GlobalLookupIndexSet();
+
+ /**
+ * @brief Find the index pair with a specific global id.
+ *
+ * This starts a binary search for the entry and therefore has complexity
+ * log(N). This method is forwarded to the underlying index set.
+ * @param global The globally unique id of the pair.
+ * @return The pair of indices for the id.
+ * @exception RangeError Thrown if the global id is not known.
+ */
+ inline const IndexPair&
+ operator[](const GlobalIndex& global) const;
+
+ /**
+ * @brief Get the index pair corresponding to a local index.
+ */
+ inline const IndexPair*
+ pair(const std::size_t& local) const;
+
+ /**
+ * @brief Get an iterator over the indices positioned at the first index.
+ * @return Iterator over the local indices.
+ */
+ inline const_iterator begin() const;
+
+ /**
+ * @brief Get an iterator over the indices positioned after the last index.
+ * @return Iterator over the local indices.
+ */
+ inline const_iterator end() const;
+
+ /**
+ * @brief Get the internal sequence number.
+ *
+ * Is initially 0 is incremented for each resize.
+ * @return The sequence number.
+ */
+ inline int seqNo() const;
+
+ /**
+ * @brief Get the total number (public and nonpublic) indices.
+ * @return The total number (public and nonpublic) indices.
+ */
+ inline size_t size() const;
+ private:
+ /**
+ * @brief The index set we lookup in.
+ */
+ const ParallelIndexSet& indexSet_;
+
+ /**
+ * @brief The number of indices.
+ */
+ std::size_t size_;
+
+ /**
+ * @brief Array with the positions of the corresponding index pair of the index set.
+ */
+ std::vector<const IndexPair*> indices_;
+
+ };
+
+
+ template<typename T>
+ struct LocalIndexComparator
+ {
+ static bool compare([[maybe_unused]] const T& t1, [[maybe_unused]] const T& t2)
+ {
+ return false;
+ }
+ };
+
+ template<class TG, class TL>
+ struct IndexSetSortFunctor
+ {
+ bool operator()(const IndexPair<TG,TL>& i1, const IndexPair<TG,TL>& i2)
+ {
+ return i1.global()<i2.global() || (i1.global()==i2.global() &&
+ LocalIndexComparator<TL>::compare(i1.local(),
+ i2.local()));
+ }
+ };
+
+
+
+ template<class TG, class TL>
+ inline std::ostream& operator<<(std::ostream& os, const IndexPair<TG,TL>& pair)
+ {
+ os<<"{global="<<pair.global_<<", local="<<pair.local_<<"}";
+ return os;
+ }
+
+ template<class TG, class TL, int N>
+ inline std::ostream& operator<<(std::ostream& os, const ParallelIndexSet<TG,TL,N>& indexSet)
+ {
+ typedef typename ParallelIndexSet<TG,TL,N>::const_iterator Iterator;
+ Iterator end = indexSet.end();
+ os<<"{";
+ for(Iterator index = indexSet.begin(); index != end; ++index)
+ os<<*index<<" ";
+ os<<"}";
+ return os;
+
+ }
+
+ template<class TG, class TL>
+ inline bool operator==(const IndexPair<TG,TL>& a, const IndexPair<TG,TL>& b)
+ {
+ return a.global_==b.global_;
+ }
+
+ template<class TG, class TL>
+ inline bool operator!=(const IndexPair<TG,TL>& a, const IndexPair<TG,TL>& b)
+ {
+ return a.global_!=b.global_;
+ }
+
+ template<class TG, class TL>
+ inline bool operator<(const IndexPair<TG,TL>& a, const IndexPair<TG,TL>& b)
+ {
+ return a.global_<b.global_;
+ }
+
+ template<class TG, class TL>
+ inline bool operator>(const IndexPair<TG,TL>& a, const IndexPair<TG,TL>& b)
+ {
+ return a.global_>b.global_;
+ }
+
+ template<class TG, class TL>
+ inline bool operator<=(const IndexPair<TG,TL>& a, const IndexPair<TG,TL>& b)
+ {
+ return a.global_<=b.global_;
+ }
+
+ template<class TG, class TL>
+ inline bool operator >=(const IndexPair<TG,TL>& a, const IndexPair<TG,TL>& b)
+ {
+ return a.global_>=b.global_;
+ }
+
+ template<class TG, class TL>
+ inline bool operator==(const IndexPair<TG,TL>& a, const TG& b)
+ {
+ return a.global_==b;
+ }
+
+ template<class TG, class TL>
+ inline bool operator!=(const IndexPair<TG,TL>& a, const TG& b)
+ {
+ return a.global_!=b;
+ }
+
+ template<class TG, class TL>
+ inline bool operator<(const IndexPair<TG,TL>& a, const TG& b)
+ {
+ return a.global_<b;
+ }
+
+ template<class TG, class TL>
+ inline bool operator>(const IndexPair<TG,TL>& a, const TG& b)
+ {
+ return a.global_>b;
+ }
+
+ template<class TG, class TL>
+ inline bool operator<=(const IndexPair<TG,TL>& a, const TG& b)
+ {
+ return a.global_<=b;
+ }
+
+ template<class TG, class TL>
+ inline bool operator >=(const IndexPair<TG,TL>& a, const TG& b)
+ {
+ return a.global_>=b;
+ }
+
+#ifndef DOXYGEN
+
+ template<class TG, class TL>
+ IndexPair<TG,TL>::IndexPair(const TG& global, const TL& local)
+ : global_(global), local_(local){}
+
+ template<class TG, class TL>
+ IndexPair<TG,TL>::IndexPair(const TG& global)
+ : global_(global), local_(){}
+
+ template<class TG, class TL>
+ IndexPair<TG,TL>::IndexPair()
+ : global_(), local_(){}
+
+ template<class TG, class TL>
+ inline const TG& IndexPair<TG,TL>::global() const {
+ return global_;
+ }
+
+ template<class TG, class TL>
+ inline TL& IndexPair<TG,TL>::local() {
+ return local_;
+ }
+
+ template<class TG, class TL>
+ inline const TL& IndexPair<TG,TL>::local() const {
+ return local_;
+ }
+
+ template<class TG, class TL>
+ inline void IndexPair<TG,TL>::setLocal(int local){
+ local_=local;
+ }
+
+ template<class TG, class TL, int N>
+ ParallelIndexSet<TG,TL,N>::ParallelIndexSet()
+ : state_(GROUND), seqNo_(0)
+ {}
+
+ template<class TG, class TL, int N>
+ void ParallelIndexSet<TG,TL,N>::beginResize()
+ {
+
+ // Checks in unproductive code
+#ifndef NDEBUG
+ if(state_!=GROUND)
+ DUNE_THROW(InvalidIndexSetState,
+ "IndexSet has to be in GROUND state, when "
+ << "beginResize() is called!");
+#endif
+
+ state_ = RESIZE;
+ deletedEntries_ = false;
+ }
+
+ template<class TG, class TL, int N>
+ inline void ParallelIndexSet<TG,TL,N>::add(const GlobalIndex& global)
+ {
+ // Checks in unproductive code
+#ifndef NDEBUG
+ if(state_ != RESIZE)
+ DUNE_THROW(InvalidIndexSetState, "Indices can only be added "
+ <<"while in RESIZE state!");
+#endif
+ newIndices_.push_back(IndexPair(global));
+ }
+
+ template<class TG, class TL, int N>
+ inline void ParallelIndexSet<TG,TL,N>::add(const TG& global, const TL& local)
+ {
+ // Checks in unproductive code
+#ifndef NDEBUG
+ if(state_ != RESIZE)
+ DUNE_THROW(InvalidIndexSetState, "Indices can only be added "
+ <<"while in RESIZE state!");
+#endif
+ newIndices_.push_back(IndexPair(global,local));
+ }
+
+ template<class TG, class TL, int N>
+ inline void ParallelIndexSet<TG,TL,N>::markAsDeleted(const iterator& global)
+ {
+ // Checks in unproductive code
+#ifndef NDEBUG
+ if(state_ != RESIZE)
+ DUNE_THROW(InvalidIndexSetState, "Indices can only be removed "
+ <<"while in RESIZE state!");
+#endif
+ deletedEntries_ = true;
+
+ global.markAsDeleted();
+ }
+
+ template<class TG, class TL, int N>
+ void ParallelIndexSet<TG,TL,N>::endResize() {
+ // Checks in unproductive code
+#ifndef NDEBUG
+ if(state_ != RESIZE)
+ DUNE_THROW(InvalidIndexSetState, "endResize called while not "
+ <<"in RESIZE state!");
+#endif
+
+ std::sort(newIndices_.begin(), newIndices_.end(), IndexSetSortFunctor<TG,TL>());
+ merge();
+ seqNo_++;
+ state_ = GROUND;
+ }
+
+
+ template<class TG, class TL, int N>
+ inline void ParallelIndexSet<TG,TL,N>::merge(){
+ if(localIndices_.size()==0)
+ {
+ localIndices_=newIndices_;
+ newIndices_.clear();
+ }
+ else if(newIndices_.size()>0 || deletedEntries_)
+ {
+ ArrayList<IndexPair,N> tempPairs;
+
+ auto old = localIndices_.begin();
+ auto added = newIndices_.begin();
+ const auto endold = localIndices_.end();
+ const auto endadded = newIndices_.end();
+
+ while(old != endold && added!= endadded)
+ {
+ if(old->local().state()==DELETED) {
+ old.eraseToHere();
+ }
+ else
+ {
+ if(old->global() < added->global() ||
+ (old->global() == added->global()
+ && LocalIndexComparator<TL>::compare(old->local(),added->local())))
+ {
+ tempPairs.push_back(*old);
+ old.eraseToHere();
+ continue;
+ }else
+ {
+ tempPairs.push_back(*added);
+ added.eraseToHere();
+ }
+ }
+ }
+
+ while(old != endold)
+ {
+ if(old->local().state()!=DELETED) {
+ tempPairs.push_back(*old);
+ }
+ old.eraseToHere();
+ }
+
+ while(added!= endadded)
+ {
+ tempPairs.push_back(*added);
+ added.eraseToHere();
+ }
+ localIndices_ = tempPairs;
+ }
+ }
+
+
+ template<class TG, class TL, int N>
+ inline const IndexPair<TG,TL>&
+ ParallelIndexSet<TG,TL,N>::at(const TG& global) const
+ {
+ // perform a binary search
+ int low=0, high=localIndices_.size()-1, probe=-1;
+
+ while(low<high)
+ {
+ probe = (high + low) / 2;
+ if(global <= localIndices_[probe].global())
+ high = probe;
+ else
+ low = probe+1;
+ }
+
+ if(probe==-1)
+ DUNE_THROW(RangeError, "No entries!");
+
+ if( localIndices_[low].global() != global)
+ DUNE_THROW(RangeError, "Could not find entry of "<<global);
+ else
+ return localIndices_[low];
+ }
+
+ template<class TG, class TL, int N>
+ inline const IndexPair<TG,TL>&
+ ParallelIndexSet<TG,TL,N>::operator[](const TG& global) const
+ {
+ // perform a binary search
+ int low=0, high=localIndices_.size()-1, probe=-1;
+
+ while(low<high)
+ {
+ probe = (high + low) / 2;
+ if(global <= localIndices_[probe].global())
+ high = probe;
+ else
+ low = probe+1;
+ }
+
+ return localIndices_[low];
+ }
+ template<class TG, class TL, int N>
+ inline IndexPair<TG,TL>& ParallelIndexSet<TG,TL,N>::at(const TG& global)
+ {
+ // perform a binary search
+ int low=0, high=localIndices_.size()-1, probe=-1;
+
+ while(low<high)
+ {
+ probe = (high + low) / 2;
+ if(localIndices_[probe].global() >= global)
+ high = probe;
+ else
+ low = probe+1;
+ }
+
+ if(probe==-1)
+ DUNE_THROW(RangeError, "No entries!");
+
+ if( localIndices_[low].global() != global)
+ DUNE_THROW(RangeError, "Could not find entry of "<<global);
+ else
+ return localIndices_[low];
+ }
+
+ template<class TG, class TL, int N>
+ inline bool ParallelIndexSet<TG,TL,N>::exists (const TG& global) const
+ {
+ // perform a binary search
+ int low=0, high=localIndices_.size()-1, probe=-1;
+
+ while(low<high)
+ {
+ probe = (high + low) / 2;
+ if(localIndices_[probe].global() >= global)
+ high = probe;
+ else
+ low = probe+1;
+ }
+
+ if(probe==-1)
+ return false;
+
+ if( localIndices_[low].global() != global)
+ return false;
+ return true;
+ }
+
+ template<class TG, class TL, int N>
+ inline IndexPair<TG,TL>& ParallelIndexSet<TG,TL,N>::operator[](const TG& global)
+ {
+ // perform a binary search
+ int low=0, high=localIndices_.size()-1, probe=-1;
+
+ while(low<high)
+ {
+ probe = (high + low) / 2;
+ if(localIndices_[probe].global() >= global)
+ high = probe;
+ else
+ low = probe+1;
+ }
+
+ return localIndices_[low];
+ }
+ template<class TG, class TL, int N>
+ inline typename ParallelIndexSet<TG,TL,N>::iterator
+ ParallelIndexSet<TG,TL,N>::begin()
+ {
+ return iterator(*this, localIndices_.begin());
+ }
+
+
+ template<class TG, class TL, int N>
+ inline typename ParallelIndexSet<TG,TL,N>::iterator
+ ParallelIndexSet<TG,TL,N>::end()
+ {
+ return iterator(*this,localIndices_.end());
+ }
+
+ template<class TG, class TL, int N>
+ inline typename ParallelIndexSet<TG,TL,N>::const_iterator
+ ParallelIndexSet<TG,TL,N>::begin() const
+ {
+ return localIndices_.begin();
+ }
+
+
+ template<class TG, class TL, int N>
+ inline typename ParallelIndexSet<TG,TL,N>::const_iterator
+ ParallelIndexSet<TG,TL,N>::end() const
+ {
+ return localIndices_.end();
+ }
+
+ template<class TG, class TL, int N>
+ void ParallelIndexSet<TG,TL,N>::renumberLocal(){
+#ifndef NDEBUG
+ if(state_==RESIZE)
+ DUNE_THROW(InvalidIndexSetState, "IndexSet has to be in "
+ <<"GROUND state for renumberLocal()");
+#endif
+
+ const auto end_ = end();
+ uint32_t index=0;
+
+ for(auto pair=begin(); pair!=end_; index++, ++pair)
+ pair->local()=index;
+ }
+
+ template<class TG, class TL, int N>
+ inline int ParallelIndexSet<TG,TL,N>::seqNo() const
+ {
+ return seqNo_;
+ }
+
+ template<class TG, class TL, int N>
+ inline size_t ParallelIndexSet<TG,TL,N>::size() const
+ {
+ return localIndices_.size();
+ }
+
+ template<class I>
+ GlobalLookupIndexSet<I>::GlobalLookupIndexSet(const I& indexset,
+ std::size_t size)
+ : indexSet_(indexset), size_(size),
+ indices_(size_, static_cast<const IndexPair*>(0))
+ {
+ const_iterator end_ = indexSet_.end();
+
+ for(const_iterator pair = indexSet_.begin(); pair!=end_; ++pair) {
+ assert(pair->local()<size_);
+ indices_[pair->local()] = &(*pair);
+ }
+ }
+
+ template<class I>
+ GlobalLookupIndexSet<I>::GlobalLookupIndexSet(const I& indexset)
+ : indexSet_(indexset), size_(0)
+ {
+ const_iterator end_ = indexSet_.end();
+ for(const_iterator pair = indexSet_.begin(); pair!=end_; ++pair)
+ size_=std::max(size_,static_cast<std::size_t>(pair->local()));
+
+ indices_.resize(++size_, 0);
+
+ for(const_iterator pair = indexSet_.begin(); pair!=end_; ++pair)
+ indices_[pair->local()] = &(*pair);
+ }
+
+ template<class I>
+ GlobalLookupIndexSet<I>::~GlobalLookupIndexSet()
+ {}
+
+ template<class I>
+ inline const IndexPair<typename I::GlobalIndex, typename I::LocalIndex>*
+ GlobalLookupIndexSet<I>::pair(const std::size_t& local) const
+ {
+ return indices_[local];
+ }
+
+ template<class I>
+ inline const IndexPair<typename I::GlobalIndex, typename I::LocalIndex>&
+ GlobalLookupIndexSet<I>::operator[](const GlobalIndex& global) const
+ {
+ return indexSet_[global];
+ }
+
+ template<class I>
+ typename I::const_iterator GlobalLookupIndexSet<I>::begin() const
+ {
+ return indexSet_.begin();
+ }
+
+ template<class I>
+ typename I::const_iterator GlobalLookupIndexSet<I>::end() const
+ {
+ return indexSet_.end();
+ }
+
+ template<class I>
+ inline size_t GlobalLookupIndexSet<I>::size() const
+ {
+ return size_;
+ }
+
+ template<class I>
+ inline int GlobalLookupIndexSet<I>::seqNo() const
+ {
+ return indexSet_.seqNo();
+ }
+
+ template<typename TG, typename TL, int N, typename TG1, typename TL1, int N1>
+ bool operator==(const ParallelIndexSet<TG,TL,N>& idxset,
+ const ParallelIndexSet<TG1,TL1,N1>& idxset1)
+ {
+ if(idxset.size()!=idxset1.size())
+ return false;
+ typedef typename ParallelIndexSet<TG,TL,N>::const_iterator Iter;
+ typedef typename ParallelIndexSet<TG1,TL1,N1>::const_iterator Iter1;
+ Iter iter=idxset.begin();
+ for(Iter1 iter1=idxset1.begin(); iter1 != idxset1.end(); ++iter, ++iter1) {
+ if(iter1->global()!=iter->global())
+ return false;
+ typedef typename ParallelIndexSet<TG,TL,N>::LocalIndex PI;
+ const PI& pi=iter->local(), pi1=iter1->local();
+
+ if(pi!=pi1)
+ return false;
+ }
+ return true;
+ }
+
+ template<typename TG, typename TL, int N, typename TG1, typename TL1, int N1>
+ bool operator!=(const ParallelIndexSet<TG,TL,N>& idxset,
+ const ParallelIndexSet<TG1,TL1,N1>& idxset1)
+ {
+ return !(idxset==idxset1);
+ }
+
+
+#endif // DOXYGEN
+
+}
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_INDICESSYNCER_HH
+#define DUNE_INDICESSYNCER_HH
+
+#include "indexset.hh"
+#include "remoteindices.hh"
+#include <dune/common/stdstreams.hh>
+#include <dune/common/sllist.hh>
+#include <cassert>
+#include <cmath>
+#include <limits>
+#include <algorithm>
+#include <functional>
+#include <map>
+#include <tuple>
+
+#if HAVE_MPI
+namespace Dune
+{
+ /** @addtogroup Common_Parallel
+ *
+ * @{
+ */
+ /**
+ * @file
+ * @brief Class for adding missing indices of a distributed index set in a local
+ * communication.
+ * @author Markus Blatt
+ */
+
+ /**
+ * @brief Class for recomputing missing indices of a distributed index set.
+ *
+ * Missing local and remote indices will be added.
+ */
+ template<typename T>
+ class IndicesSyncer
+ {
+ public:
+
+ /** @brief The type of the index set. */
+ typedef T ParallelIndexSet;
+
+ /** @brief The type of the index pair */
+ typedef typename ParallelIndexSet::IndexPair IndexPair;
+
+ /** @brief Type of the global index used in the index set. */
+ typedef typename ParallelIndexSet::GlobalIndex GlobalIndex;
+
+ /** @brief Type of the attribute used in the index set. */
+ typedef typename ParallelIndexSet::LocalIndex::Attribute Attribute;
+
+ /**
+ * @brief Type of the remote indices.
+ */
+ typedef Dune::RemoteIndices<ParallelIndexSet> RemoteIndices;
+
+ /**
+ * @brief Constructor.
+ *
+ * The source as well as the target index set of the remote
+ * indices have to be the same as the provided index set.
+ * @param indexSet The index set with the information
+ * of the locally present indices.
+ * @param remoteIndices The remoteIndices.
+ */
+ IndicesSyncer(ParallelIndexSet& indexSet,
+ RemoteIndices& remoteIndices);
+
+ /**
+ * @brief Sync the index set.
+ *
+ * Computes the missing indices in the local and the remote index list and adds them.
+ * No global communication is necessary!
+ * All indices added to the index will become the local index
+ * std::numeric_limits<size_t>::max()
+ *
+ */
+ void sync();
+
+ /**
+ * @brief Synce the index set and assign local numbers to new indices
+ *
+ * Computes the missing indices in the local and the remote index list and adds them.
+ * No global communication is necessary!
+ * @param numberer Functor providing the local indices for the added global indices.
+ * has to provide a function size_t operator()(const TG& global) that provides the
+ * local index to a global one. It will be called for ascending global indices.
+ *
+ */
+ template<typename T1>
+ void sync(T1& numberer);
+
+ private:
+
+ /** @brief The set of locally present indices.*/
+ ParallelIndexSet& indexSet_;
+
+ /** @brief The remote indices. */
+ RemoteIndices& remoteIndices_;
+
+ /** @brief The send buffers for the neighbour processes. */
+ char** sendBuffers_;
+
+ /** @brief The receive buffer. */
+ char* receiveBuffer_;
+
+ /** @brief The size of the send buffers. */
+ std::size_t* sendBufferSizes_;
+
+ /** @brief The size of the receive buffer in bytes. */
+ int receiveBufferSize_; // int because of MPI
+
+ /**
+ * @brief Information about the messages to send to a neighbouring process.
+ */
+ struct MessageInformation
+ {
+ MessageInformation()
+ : publish(), pairs()
+ {}
+ /** @brief The number of indices we publish for the other process. */
+ int publish;
+ /**
+ * @brief The number of pairs (attribute and process number)
+ * we publish to the neighbour process.
+ */
+ int pairs;
+ };
+
+ /**
+ * @brief Default numberer for sync().
+ */
+ class DefaultNumberer
+ {
+ public:
+ /**
+ * @brief Provide the lcoal index, always
+ * std::numeric_limits<size_t>::max()
+ * @param global The global index (ignored).
+ */
+ std::size_t operator()([[maybe_unused]] const GlobalIndex& global)
+ {
+ return std::numeric_limits<size_t>::max();
+ }
+ };
+
+ /** @brief The mpi datatype for the MessageInformation */
+ MPI_Datatype datatype_;
+
+ /** @brief Our rank. */
+ int rank_;
+
+ /**
+ * @brief List type for temporarily storing the global indices of the
+ * remote indices.
+ */
+ typedef SLList<std::pair<GlobalIndex,Attribute>, typename RemoteIndices::Allocator> GlobalIndexList;
+
+ /** @brief The modifying iterator for the global index list. */
+ typedef typename GlobalIndexList::ModifyIterator GlobalIndexModifier;
+
+ /**
+ * @brief The type of the iterator of GlobalIndexList
+ */
+ typedef typename SLList<GlobalIndex, typename RemoteIndices::Allocator>::iterator
+ GlobalIndexIterator;
+
+ /** @brief Type of the map of ranks onto GlobalIndexLists. */
+ typedef std::map<int, GlobalIndexList> GlobalIndicesMap;
+
+ /**
+ * @brief Map of global index lists onto process ranks.
+ *
+ * As the pointers in the remote index lists become invalid due to
+ * resorting the index set entries one has store the corresponding
+ * global index for each remote index. Thus the pointers can be adjusted
+ * properly as a last step.
+ */
+ GlobalIndicesMap globalMap_;
+
+ /**
+ * @brief The type of the single linked list of bools.
+ */
+ typedef SLList<bool, typename RemoteIndices::Allocator> BoolList;
+
+ /**
+ * @brief The mutable iterator of the single linked bool list.
+ */
+ typedef typename BoolList::iterator BoolIterator;
+
+ /** @brief The type of the modifying iterator for the list of bools. */
+ typedef typename BoolList::ModifyIterator BoolListModifier;
+
+ /** @brief The type of the map of bool lists. */
+ typedef std::map<int,BoolList> BoolMap;
+
+ /**
+ * @brief Map of lists of bool indicating whether the remote index was present before
+ * call of sync.
+ */
+ BoolMap oldMap_;
+
+ /** @brief Information about the messages we send. */
+ std::map<int,MessageInformation> infoSend_;
+
+ /** @brief The type of the remote index list. */
+ typedef typename RemoteIndices::RemoteIndexList RemoteIndexList;
+
+ /** @brief The tyoe of the modifying iterator of the remote index list. */
+ typedef typename RemoteIndexList::ModifyIterator RemoteIndexModifier;
+
+ /** @brief The type of the remote inde. */
+ typedef Dune::RemoteIndex<GlobalIndex,Attribute> RemoteIndex;
+
+ /** @brief The iterator of the remote index list. */
+ typedef typename RemoteIndexList::iterator RemoteIndexIterator;
+
+ /** @brief The const iterator of the remote index list. */
+ typedef typename RemoteIndexList::const_iterator ConstRemoteIndexIterator;
+
+ /** @brief Type of the tuple of iterators needed for the adding of indices. */
+ typedef std::tuple<RemoteIndexModifier,GlobalIndexModifier,BoolListModifier,
+ const ConstRemoteIndexIterator> IteratorTuple;
+
+ /**
+ * @brief A tuple of iterators.
+ *
+ * Insertion into a single linked list is only possible at the position after the one of the iterator.
+ * Therefore for each linked list two iterators are needed: One position before the actual entry
+ * (for insertion) and one positioned at the actual position (for searching).
+ */
+ class Iterators
+ {
+ friend class IndicesSyncer<T>;
+ public:
+ /**
+ * @brief Constructor.
+ *
+ * Initializes all iterator to first entry and the one before the first entry, respectively.
+ * @param remoteIndices The list of the remote indices.
+ * @param globalIndices The list of the coresponding global indices. This is needed because the
+ * the pointers to the local index will become invalid due to the merging of the index sets.
+ * @param booleans Whether the remote index was there before the sync process started.
+ */
+ Iterators(RemoteIndexList& remoteIndices, GlobalIndexList& globalIndices,
+ BoolList& booleans);
+
+ /**
+ * @brief Default constructor.
+ */
+ Iterators();
+
+ /**
+ * @brief Increment all iteraors.
+ */
+ Iterators& operator++();
+
+ /**
+ * @brief Insert a new remote index to the underlying remote index list.
+ * @param index The remote index.
+ * @param global The global index corresponding to the remote index.
+ */
+ void insert(const RemoteIndex& index,
+ const std::pair<GlobalIndex,Attribute>& global);
+
+ /**
+ * @brief Get the remote index at current position.
+ * @return The current remote index.
+ */
+ RemoteIndex& remoteIndex() const;
+
+ /**
+ * @brief Get the global index of the remote index at current position.
+ * @return The current global index.
+ */
+ std::pair<GlobalIndex,Attribute>& globalIndexPair() const;
+
+ Attribute& attribute() const;
+
+ /**
+ * @brief Was this entry already in the remote index list before the sync process?
+ * @return True if the current index wasalready in the remote index list
+ * before the sync process.
+ */
+ bool isOld() const;
+
+ /**
+ * @brief Reset all the underlying iterators.
+ *
+ * Position them to first list entry and the entry before the first entry respectively.
+ * @param remoteIndices The list of the remote indices.
+ * @param globalIndices The list of the coresponding global indices. This is needed because the
+ * the pointers to the local index will become invalid due to the merging of the index sets.
+ * @param booleans Whether the remote index was there before the sync process started.
+ */
+ void reset(RemoteIndexList& remoteIndices, GlobalIndexList& globalIndices,
+ BoolList& booleans);
+
+ /**
+ * @brief Are we not at the end of the list?
+ * @return True if the iterators are not positioned at the end of the list
+ * and the tail of the list respectively.
+ */
+ bool isNotAtEnd() const;
+
+ /**
+ * @brief Are we at the end of the list?
+ * @return True if the iterators are positioned at the end of the list
+ * and the tail of the list respectively.
+ */
+ bool isAtEnd() const;
+
+ private:
+ /**
+ * @brief The iterator tuple.
+ *
+ * The tuple consists of one iterator over a single linked list of remote indices
+ * initially positioned before the first entry, one over a sll of global indices
+ * , one over a all of bool values both postioned at the same entry. The another three
+ * iterators of the same type positioned at the first entry. Last an iterator over the
+ * sll of remote indices positioned at the end.
+ */
+ IteratorTuple iterators_;
+ };
+
+ /** @brief Type of the map from ranks to iterator tuples. */
+ typedef std::map<int,Iterators> IteratorsMap;
+
+ /**
+ * @brief The iterator tuples mapped on the neighbours.
+ *
+ * The key of the map is the rank of the neighbour.
+ * The first entry in the tuple is an iterator over the remote indices
+ * initially positioned before the first entry. The second entry is an
+ * iterator over the corresponding global indices also initially positioned
+ * before the first entry. The third entry an iterator over remote indices
+ * initially positioned at the beginning. The last entry is the iterator over
+ * the remote indices positioned at the end.
+ */
+ IteratorsMap iteratorsMap_;
+
+ /** @brief Calculates the message sizes to send. */
+ void calculateMessageSizes();
+
+ /**
+ * @brief Pack and send the message for another process.
+ * @param destination The rank of the process we send to.
+ * @param buffer The allocated buffer to use.
+ * @param bufferSize The size of the buffer.
+ * @param req The MPI_Request to setup the nonblocking send.
+ */
+ void packAndSend(int destination, char* buffer, std::size_t bufferSize, MPI_Request& req);
+
+ /**
+ * @brief Recv and unpack the message from another process and add the indices.
+ * @param numberer Functor providing local indices for added global indices.
+ */
+ template<typename T1>
+ void recvAndUnpack(T1& numberer);
+
+ /**
+ * @brief Register the MPI datatype for the MessageInformation.
+ */
+ void registerMessageDatatype();
+
+ /**
+ * @brief Insert an entry into the remote index list if not yet present.
+ */
+ void insertIntoRemoteIndexList(int process,
+ const std::pair<GlobalIndex,Attribute>& global,
+ char attribute);
+
+ /**
+ * @brief Reset the iterator tuples of all neighbouring processes.
+ */
+ void resetIteratorsMap();
+
+ /**
+ * @brief Check whether the iterator tuples of all neighbouring processes
+ * are reset.
+ */
+ bool checkReset();
+
+ /**
+ * @brief Check whether the iterator tuple is reset.
+ *
+ * @param iterators The iterator tuple to check.
+ * @param rlist The SLList of the remote indices.
+ * @param gList The SLList of the global indices.
+ * @param bList The SLList of the bool values.
+ */
+ bool checkReset(const Iterators& iterators, RemoteIndexList& rlist, GlobalIndexList& gList,
+ BoolList& bList);
+ };
+
+ template<typename TG, typename TA>
+ bool operator<(const IndexPair<TG,ParallelLocalIndex<TA> >& i1,
+ const std::pair<TG,TA>& i2)
+ {
+ return i1.global() < i2.first ||
+ (i1.global() == i2.first && i1.local().attribute()<i2.second);
+ }
+
+ template<typename TG, typename TA>
+ bool operator<(const std::pair<TG,TA>& i1,
+ const IndexPair<TG,ParallelLocalIndex<TA> >& i2)
+ {
+ return i1.first < i2.global() ||
+ (i1.first == i2.global() && i1.second<i2.local().attribute());
+ }
+
+ template<typename TG, typename TA>
+ bool operator==(const IndexPair<TG,ParallelLocalIndex<TA> >& i1,
+ const std::pair<TG,TA>& i2)
+ {
+ return (i1.global() == i2.first && i1.local().attribute()==i2.second);
+ }
+
+ template<typename TG, typename TA>
+ bool operator!=(const IndexPair<TG,ParallelLocalIndex<TA> >& i1,
+ const std::pair<TG,TA>& i2)
+ {
+ return (i1.global() != i2.first || i1.local().attribute()!=i2.second);
+ }
+
+ template<typename TG, typename TA>
+ bool operator==(const std::pair<TG,TA>& i2,
+ const IndexPair<TG,ParallelLocalIndex<TA> >& i1)
+ {
+ return (i1.global() == i2.first && i1.local().attribute()==i2.second);
+ }
+
+ template<typename TG, typename TA>
+ bool operator!=(const std::pair<TG,TA>& i2,
+ const IndexPair<TG,ParallelLocalIndex<TA> >& i1)
+ {
+ return (i1.global() != i2.first || i1.local().attribute()!=i2.second);
+ }
+
+ /**
+ * @brief Stores the corresponding global indices of the remote index information.
+ *
+ * Whenever a ParallelIndexSet is resized all RemoteIndices that use it will be invalided
+ * as the pointers to the index set are invalid after calling ParallelIndexSet::Resize()
+ * One can rebuild them by storing the global indices in a map with this function and later
+ * repairing the pointers by calling repairLocalIndexPointers.
+ *
+ * @warning The RemoteIndices class has to be build with the same index set for both the
+ * sending and receiving side
+ * @param globalMap Map to store the corresponding global indices in.
+ * @param remoteIndices The remote index information we need to store the corresponding global
+ * indices of.
+ * @param indexSet The index set that is for both the sending and receiving side of the remote
+ * index information.
+ */
+ template<typename T, typename A, typename A1>
+ void storeGlobalIndicesOfRemoteIndices(std::map<int,SLList<std::pair<typename T::GlobalIndex, typename T::LocalIndex::Attribute>,A> >& globalMap,
+ const RemoteIndices<T,A1>& remoteIndices)
+ {
+ for(auto remote = remoteIndices.begin(), end =remoteIndices.end(); remote != end; ++remote) {
+ typedef typename RemoteIndices<T,A1>::RemoteIndexList RemoteIndexList;
+ typedef SLList<std::pair<typename T::GlobalIndex, typename T::LocalIndex::Attribute>,A> GlobalIndexList;
+ GlobalIndexList& global = globalMap[remote->first];
+ RemoteIndexList& rList = *(remote->second.first);
+
+ for(auto index = rList.begin(), riEnd = rList.end();
+ index != riEnd; ++index) {
+ global.push_back(std::make_pair(index->localIndexPair().global(),
+ index->localIndexPair().local().attribute()));
+ }
+ }
+ }
+
+ /**
+ * @brief Repair the pointers to the local indices in the remote indices.
+ *
+ * @param globalMap The map of the process number to the list of global indices
+ * corresponding to the remote index list of the process.
+ * @param remoteIndices The known remote indices.
+ * @param indexSet The set of local indices of the current process.
+ */
+ template<typename T, typename A, typename A1>
+ inline void repairLocalIndexPointers(std::map<int,
+ SLList<std::pair<typename T::GlobalIndex,
+ typename T::LocalIndex::Attribute>,A> >& globalMap,
+ RemoteIndices<T,A1>& remoteIndices,
+ const T& indexSet)
+ {
+ assert(globalMap.size()==static_cast<std::size_t>(remoteIndices.neighbours()));
+ // Repair pointers to index set in remote indices.
+ auto global = globalMap.begin();
+ auto end = remoteIndices.remoteIndices_.end();
+
+ for(auto remote = remoteIndices.remoteIndices_.begin(); remote != end; ++remote, ++global) {
+ assert(remote->first==global->first);
+ assert(remote->second.first->size() == global->second.size());
+
+ auto riEnd = remote->second.first->end();
+ auto rIndex = remote->second.first->begin();
+ auto gIndex = global->second.begin();
+ auto index = indexSet.begin();
+
+ assert(rIndex==riEnd || gIndex != global->second.end());
+ while(rIndex != riEnd) {
+ // Search for the index in the set.
+ assert(gIndex != global->second.end());
+
+ while(!(index->global() == gIndex->first
+ && index->local().attribute() == gIndex->second)) {
+ ++index;
+ // this is only needed for ALU, where there may exist
+ // more entries with the same global index in the remote index set
+ // than in the index set
+ if (index->global() > gIndex->first) {
+ index=indexSet.begin();
+ }
+ }
+
+ assert(index != indexSet.end() && *index == *gIndex);
+
+ rIndex->localIndex_ = &(*index);
+ ++index;
+ ++rIndex;
+ ++gIndex;
+ }
+ }
+ remoteIndices.sourceSeqNo_ = remoteIndices.source_->seqNo();
+ remoteIndices.destSeqNo_ = remoteIndices.target_->seqNo();
+ }
+
+ template<typename T>
+ IndicesSyncer<T>::IndicesSyncer(ParallelIndexSet& indexSet,
+ RemoteIndices& remoteIndices)
+ : indexSet_(indexSet), remoteIndices_(remoteIndices)
+ {
+ // index sets must match.
+ assert(remoteIndices.source_ == remoteIndices.target_);
+ assert(remoteIndices.source_ == &indexSet);
+ MPI_Comm_rank(remoteIndices_.communicator(), &rank_);
+ }
+
+ template<typename T>
+ IndicesSyncer<T>::Iterators::Iterators(RemoteIndexList& remoteIndices,
+ GlobalIndexList& globalIndices,
+ BoolList& booleans)
+ : iterators_(remoteIndices.beginModify(), globalIndices.beginModify(),
+ booleans.beginModify(), remoteIndices.end())
+ { }
+
+ template<typename T>
+ IndicesSyncer<T>::Iterators::Iterators()
+ : iterators_()
+ {}
+
+ template<typename T>
+ inline typename IndicesSyncer<T>::Iterators& IndicesSyncer<T>::Iterators::operator++()
+ {
+ ++(std::get<0>(iterators_));
+ ++(std::get<1>(iterators_));
+ ++(std::get<2>(iterators_));
+ return *this;
+ }
+
+ template<typename T>
+ inline void IndicesSyncer<T>::Iterators::insert(const RemoteIndex & index,
+ const std::pair<GlobalIndex,Attribute>& global)
+ {
+ std::get<0>(iterators_).insert(index);
+ std::get<1>(iterators_).insert(global);
+ std::get<2>(iterators_).insert(false);
+ }
+
+ template<typename T>
+ inline typename IndicesSyncer<T>::RemoteIndex&
+ IndicesSyncer<T>::Iterators::remoteIndex() const
+ {
+ return *(std::get<0>(iterators_));
+ }
+
+ template<typename T>
+ inline std::pair<typename IndicesSyncer<T>::GlobalIndex,typename IndicesSyncer<T>::Attribute>&
+ IndicesSyncer<T>::Iterators::globalIndexPair() const
+ {
+ return *(std::get<1>(iterators_));
+ }
+
+ template<typename T>
+ inline bool IndicesSyncer<T>::Iterators::isOld() const
+ {
+ return *(std::get<2>(iterators_));
+ }
+
+ template<typename T>
+ inline void IndicesSyncer<T>::Iterators::reset(RemoteIndexList& remoteIndices,
+ GlobalIndexList& globalIndices,
+ BoolList& booleans)
+ {
+ std::get<0>(iterators_) = remoteIndices.beginModify();
+ std::get<1>(iterators_) = globalIndices.beginModify();
+ std::get<2>(iterators_) = booleans.beginModify();
+ }
+
+ template<typename T>
+ inline bool IndicesSyncer<T>::Iterators::isNotAtEnd() const
+ {
+ return std::get<0>(iterators_) != std::get<3>(iterators_);
+ }
+
+ template<typename T>
+ inline bool IndicesSyncer<T>::Iterators::isAtEnd() const
+ {
+ return std::get<0>(iterators_) == std::get<3>(iterators_);
+ }
+
+ template<typename T>
+ void IndicesSyncer<T>::registerMessageDatatype()
+ {
+ MPI_Datatype type[2] = {MPI_INT, MPI_INT};
+ int blocklength[2] = {1,1};
+ MPI_Aint displacement[2];
+ MPI_Aint base;
+
+ // Compute displacement
+ MessageInformation message;
+
+ MPI_Get_address( &(message.publish), displacement);
+ MPI_Get_address( &(message.pairs), displacement+1);
+
+ // Make the displacement relative
+ MPI_Get_address(&message, &base);
+ displacement[0] -= base;
+ displacement[1] -= base;
+
+ MPI_Type_create_struct( 2, blocklength, displacement, type, &datatype_);
+ MPI_Type_commit(&datatype_);
+ }
+
+ template<typename T>
+ void IndicesSyncer<T>::calculateMessageSizes()
+ {
+ auto iEnd = indexSet_.end();
+ auto collIter = remoteIndices_.template iterator<true>();
+
+ for(auto index = indexSet_.begin(); index != iEnd; ++index) {
+ collIter.advance(index->global(), index->local().attribute());
+ if(collIter.empty())
+ break;
+ int knownRemote=0;
+ auto end = collIter.end();
+
+ // Count the remote indices we know.
+ for(auto valid = collIter.begin(); valid != end; ++valid) {
+ ++knownRemote;
+ }
+
+ if(knownRemote>0) {
+ Dune::dverb<<rank_<<": publishing "<<knownRemote<<" for index "<<index->global()<< " for processes ";
+
+ // Update MessageInformation
+ for(auto valid = collIter.begin(); valid != end; ++valid) {
+ ++(infoSend_[valid.process()].publish);
+ (infoSend_[valid.process()].pairs) += knownRemote;
+ Dune::dverb<<valid.process()<<" ";
+ Dune::dverb<<"(publish="<<infoSend_[valid.process()].publish<<", pairs="<<infoSend_[valid.process()].pairs
+ <<") ";
+ }
+ Dune::dverb<<std::endl;
+ }
+ }
+
+ const auto end = infoSend_.end();
+
+ // Now determine the buffersizes needed for each neighbour using MPI_Pack_size
+ MessageInformation dummy;
+
+ auto messageIter= infoSend_.begin();
+ const auto rend = remoteIndices_.end();
+ int neighbour=0;
+
+ for(auto remote = remoteIndices_.begin(); remote != rend; ++remote, ++neighbour) {
+ MessageInformation* message;
+ MessageInformation recv;
+
+ if(messageIter != end && messageIter->first==remote->first) {
+ // We want to send message information to that process
+ message = const_cast<MessageInformation*>(&(messageIter->second));
+ ++messageIter;
+ }else
+ // We do not want to send information but the other process might.
+ message = &dummy;
+
+ sendBufferSizes_[neighbour]=0;
+ int tsize;
+ // The number of indices published
+ MPI_Pack_size(1, MPI_INT,remoteIndices_.communicator(), &tsize);
+ sendBufferSizes_[neighbour] += tsize;
+
+ for(int i=0; i < message->publish; ++i) {
+ // The global index
+ MPI_Pack_size(1, MPITraits<GlobalIndex>::getType(), remoteIndices_.communicator(), &tsize);
+ sendBufferSizes_[neighbour] += tsize;
+ // The attribute in the local index
+ MPI_Pack_size(1, MPI_CHAR, remoteIndices_.communicator(), &tsize);
+ sendBufferSizes_[neighbour] += tsize;
+ // The number of corresponding remote indices
+ MPI_Pack_size(1, MPI_INT, remoteIndices_.communicator(), &tsize);
+ sendBufferSizes_[neighbour] += tsize;
+ }
+ for(int i=0; i < message->pairs; ++i) {
+ // The process of the remote index
+ MPI_Pack_size(1, MPI_INT, remoteIndices_.communicator(), &tsize);
+ sendBufferSizes_[neighbour] += tsize;
+ // The attribute of the remote index
+ MPI_Pack_size(1, MPI_CHAR, remoteIndices_.communicator(), &tsize);
+ sendBufferSizes_[neighbour] += tsize;
+ }
+
+ Dune::dverb<<rank_<<": Buffer (neighbour="<<remote->first<<") size is "<< sendBufferSizes_[neighbour]<<" for publish="<<message->publish<<" pairs="<<message->pairs<<std::endl;
+ }
+
+ }
+
+ template<typename T>
+ inline void IndicesSyncer<T>::sync()
+ {
+ DefaultNumberer numberer;
+ sync(numberer);
+ }
+
+ template<typename T>
+ template<typename T1>
+ void IndicesSyncer<T>::sync(T1& numberer)
+ {
+ // The pointers to the local indices in the remote indices
+ // will become invalid due to the resorting of the index set.
+ // Therefore store the corresponding global indices.
+ // Mark all indices as not added
+ const auto end = remoteIndices_.end();
+
+ // Number of neighbours might change during the syncing.
+ // save the old neighbours
+ std::size_t noOldNeighbours = remoteIndices_.neighbours();
+ int* oldNeighbours = new int[noOldNeighbours];
+ sendBufferSizes_ = new std::size_t[noOldNeighbours];
+ std::size_t neighbourI = 0;
+
+ for(auto remote = remoteIndices_.begin(); remote != end; ++remote, ++neighbourI) {
+ oldNeighbours[neighbourI] = remote->first;
+
+ // Make sure we only have one remote index list.
+ assert(remote->second.first==remote->second.second);
+
+ RemoteIndexList& rList = *(remote->second.first);
+
+ // Store the corresponding global indices.
+ GlobalIndexList& global = globalMap_[remote->first];
+ BoolList& added = oldMap_[remote->first];
+ auto riEnd = rList.end();
+
+ for(auto index = rList.begin();
+ index != riEnd; ++index) {
+ global.push_back(std::make_pair(index->localIndexPair().global(),
+ index->localIndexPair().local().attribute()));
+ added.push_back(true);
+ }
+
+ Iterators iterators(rList, global, added);
+ iteratorsMap_.insert(std::make_pair(remote->first, iterators));
+ assert(checkReset(iteratorsMap_[remote->first], rList,global,added));
+ }
+
+ // Exchange indices with each neighbour
+ calculateMessageSizes();
+
+ // Allocate the buffers
+ receiveBufferSize_=1;
+ sendBuffers_ = new char*[noOldNeighbours];
+
+ for(std::size_t i=0; i<noOldNeighbours; ++i) {
+ sendBuffers_[i] = new char[sendBufferSizes_[i]];
+ receiveBufferSize_ = std::max(receiveBufferSize_, static_cast<int>(sendBufferSizes_[i]));
+ }
+
+ receiveBuffer_=new char[receiveBufferSize_];
+
+ indexSet_.beginResize();
+
+ Dune::dverb<<rank_<<": Neighbours: ";
+
+ for(std::size_t i = 0; i<noOldNeighbours; ++i)
+ Dune::dverb<<oldNeighbours[i]<<" ";
+
+ Dune::dverb<<std::endl;
+
+ MPI_Request* requests = new MPI_Request[noOldNeighbours];
+ MPI_Status* statuses = new MPI_Status[noOldNeighbours];
+
+ // Pack Message data and start the sends
+ for(std::size_t i = 0; i<noOldNeighbours; ++i)
+ packAndSend(oldNeighbours[i], sendBuffers_[i], sendBufferSizes_[i], requests[i]);
+
+ // Probe for incoming messages, receive and unpack them
+ for(std::size_t i = 0; i<noOldNeighbours; ++i)
+ recvAndUnpack(numberer);
+ // }else{
+ // recvAndUnpack(oldNeighbours[i], numberer);
+ // packAndSend(oldNeighbours[i]);
+ // }
+ // }
+
+ delete[] receiveBuffer_;
+
+ // Wait for the completion of the sends
+ // Wait for completion of sends
+ if(MPI_SUCCESS!=MPI_Waitall(noOldNeighbours, requests, statuses)) {
+ std::cerr<<": MPI_Error occurred while sending message"<<std::endl;
+ for(std::size_t i=0; i< noOldNeighbours; i++)
+ if(MPI_SUCCESS!=statuses[i].MPI_ERROR)
+ std::cerr<<"Destination "<<statuses[i].MPI_SOURCE<<" error code: "<<statuses[i].MPI_ERROR<<std::endl;
+ }
+
+ delete[] statuses;
+ delete[] requests;
+
+ for(std::size_t i=0; i<noOldNeighbours; ++i)
+ delete[] sendBuffers_[i];
+
+ delete[] sendBuffers_;
+ delete[] sendBufferSizes_;
+
+ // No need for the iterator tuples any more
+ iteratorsMap_.clear();
+
+ indexSet_.endResize();
+
+ delete[] oldNeighbours;
+
+ repairLocalIndexPointers(globalMap_, remoteIndices_, indexSet_);
+
+ oldMap_.clear();
+ globalMap_.clear();
+
+ // update the sequence number
+ remoteIndices_.sourceSeqNo_ = remoteIndices_.destSeqNo_ = indexSet_.seqNo();
+ }
+
+ template<typename T>
+ void IndicesSyncer<T>::packAndSend(int destination, char* buffer, std::size_t bufferSize, MPI_Request& request)
+ {
+ auto iEnd = indexSet_.end();
+ int bpos = 0;
+ int published = 0;
+ int pairs = 0;
+
+ assert(checkReset());
+
+ // Pack the number of indices we publish
+ MPI_Pack(&(infoSend_[destination].publish), 1, MPI_INT, buffer, bufferSize, &bpos,
+ remoteIndices_.communicator());
+
+ for(auto index = indexSet_.begin(); index != iEnd; ++index) {
+ // Search for corresponding remote indices in all iterator tuples
+ auto iteratorsEnd = iteratorsMap_.end();
+
+ // advance all iterators to a position with global index >= index->global()
+ for(auto iterators = iteratorsMap_.begin(); iteratorsEnd != iterators; ++iterators) {
+ while(iterators->second.isNotAtEnd() &&
+ iterators->second.globalIndexPair().first < index->global())
+ ++(iterators->second);
+ assert(!iterators->second.isNotAtEnd() || iterators->second.globalIndexPair().first >= index->global());
+ }
+
+ // Add all remote indices positioned at global which were already present before calling sync
+ // to the message.
+ // Count how many remote indices we will send
+ int indices = 0;
+ bool knownRemote = false; // Is the remote process supposed to know this index?
+
+ for(auto iterators = iteratorsMap_.begin(); iteratorsEnd != iterators; ++iterators)
+ {
+ std::pair<GlobalIndex,Attribute> p;
+ if (iterators->second.isNotAtEnd())
+ {
+ p = iterators->second.globalIndexPair();
+ }
+
+ if(iterators->second.isNotAtEnd() && iterators->second.isOld()
+ && iterators->second.globalIndexPair().first == index->global()) {
+ indices++;
+ if(destination == iterators->first)
+ knownRemote = true;
+ }
+ }
+
+ if(!knownRemote)
+ // We do not need to send any indices
+ continue;
+
+ Dune::dverb<<rank_<<": sending "<<indices<<" for index "<<index->global()<<" to "<<destination<<std::endl;
+
+
+ // Pack the global index, the attribute and the number
+ MPI_Pack(const_cast<GlobalIndex*>(&(index->global())), 1, MPITraits<GlobalIndex>::getType(), buffer, bufferSize, &bpos,
+ remoteIndices_.communicator());
+
+ char attr = index->local().attribute();
+ MPI_Pack(&attr, 1, MPI_CHAR, buffer, bufferSize, &bpos,
+ remoteIndices_.communicator());
+
+ // Pack the number of remote indices we send.
+ MPI_Pack(&indices, 1, MPI_INT, buffer, bufferSize, &bpos,
+ remoteIndices_.communicator());
+
+ // Pack the information about the remote indices
+ for(auto iterators = iteratorsMap_.begin(); iteratorsEnd != iterators; ++iterators)
+ if(iterators->second.isNotAtEnd() && iterators->second.isOld()
+ && iterators->second.globalIndexPair().first == index->global()) {
+ int process = iterators->first;
+
+ ++pairs;
+ assert(pairs <= infoSend_[destination].pairs);
+ MPI_Pack(&process, 1, MPI_INT, buffer, bufferSize, &bpos,
+ remoteIndices_.communicator());
+ char attr2 = iterators->second.remoteIndex().attribute();
+
+ MPI_Pack(&attr2, 1, MPI_CHAR, buffer, bufferSize, &bpos,
+ remoteIndices_.communicator());
+ --indices;
+ }
+ assert(indices==0);
+ ++published;
+ Dune::dvverb<<" (publish="<<published<<", pairs="<<pairs<<")"<<std::endl;
+ assert(published <= infoSend_[destination].publish);
+ }
+
+ // Make sure we send all expected entries
+ assert(published == infoSend_[destination].publish);
+ assert(pairs == infoSend_[destination].pairs);
+ resetIteratorsMap();
+
+ Dune::dverb << rank_<<": Sending message of "<<bpos<<" bytes to "<<destination<<std::endl;
+
+ MPI_Issend(buffer, bpos, MPI_PACKED, destination, 345, remoteIndices_.communicator(),&request);
+ }
+
+ template<typename T>
+ inline void IndicesSyncer<T>::insertIntoRemoteIndexList(int process,
+ const std::pair<GlobalIndex,Attribute>& globalPair,
+ char attribute)
+ {
+ Dune::dverb<<"Inserting from "<<process<<" "<<globalPair.first<<", "<<
+ globalPair.second<<" "<<attribute<<std::endl;
+
+ resetIteratorsMap();
+
+ // There might be cases where there no remote indices for that process yet
+ typename IteratorsMap::iterator found = iteratorsMap_.find(process);
+
+ if( found == iteratorsMap_.end() ) {
+ Dune::dverb<<"Discovered new neighbour "<<process<<std::endl;
+ RemoteIndexList* rlist = new RemoteIndexList();
+ remoteIndices_.remoteIndices_.insert(std::make_pair(process,std::make_pair(rlist,rlist)));
+ Iterators iterators = Iterators(*rlist, globalMap_[process], oldMap_[process]);
+ found = iteratorsMap_.insert(std::make_pair(process, iterators)).first;
+ }
+
+ Iterators& iterators = found->second;
+
+ // Search for the remote index
+ while(iterators.isNotAtEnd() && iterators.globalIndexPair() < globalPair) {
+ // Increment all iterators
+ ++iterators;
+
+ }
+
+ if(iterators.isAtEnd() || iterators.globalIndexPair() != globalPair) {
+ // The entry is not yet known
+ // Insert in the list and do not change the first iterator.
+ iterators.insert(RemoteIndex(Attribute(attribute)),globalPair);
+ return;
+ }
+
+ // Global indices match
+ bool indexIsThere=false;
+ for(Iterators tmpIterators = iterators;
+ !tmpIterators.isAtEnd() && tmpIterators.globalIndexPair() == globalPair;
+ ++tmpIterators)
+ //entry already exists with the same attribute
+ if(tmpIterators.globalIndexPair().second == attribute) {
+ indexIsThere=true;
+ break;
+ }
+
+ if(!indexIsThere)
+ // The entry is not yet known
+ // Insert in the list and do not change the first iterator.
+ iterators.insert(RemoteIndex(Attribute(attribute)),globalPair);
+ }
+
+ template<typename T>
+ template<typename T1>
+ void IndicesSyncer<T>::recvAndUnpack(T1& numberer)
+ {
+ const ParallelIndexSet& constIndexSet = indexSet_;
+ auto iEnd = constIndexSet.end();
+ auto index = constIndexSet.begin();
+ int bpos = 0;
+ int publish;
+
+ assert(checkReset());
+
+ MPI_Status status;
+
+ // We have to determine the message size and source before the receive
+ MPI_Probe(MPI_ANY_SOURCE, 345, remoteIndices_.communicator(), &status);
+
+ int source=status.MPI_SOURCE;
+ int count;
+ MPI_Get_count(&status, MPI_PACKED, &count);
+
+ Dune::dvverb<<rank_<<": Receiving message from "<< source<<" with "<<count<<" bytes"<<std::endl;
+
+ if(count>receiveBufferSize_) {
+ receiveBufferSize_=count;
+ delete[] receiveBuffer_;
+ receiveBuffer_ = new char[receiveBufferSize_];
+ }
+
+ MPI_Recv(receiveBuffer_, count, MPI_PACKED, source, 345, remoteIndices_.communicator(), &status);
+
+ // How many global entries were published?
+ MPI_Unpack(receiveBuffer_, count, &bpos, &publish, 1, MPI_INT, remoteIndices_.communicator());
+
+ // Now unpack the remote indices and add them.
+ while(publish>0) {
+
+ // Unpack information about the local index on the source process
+ GlobalIndex global; // global index of the current entry
+ char sourceAttribute; // Attribute on the source process
+ int pairs;
+
+ MPI_Unpack(receiveBuffer_, count, &bpos, &global, 1, MPITraits<GlobalIndex>::getType(),
+ remoteIndices_.communicator());
+ MPI_Unpack(receiveBuffer_, count, &bpos, &sourceAttribute, 1, MPI_CHAR,
+ remoteIndices_.communicator());
+ MPI_Unpack(receiveBuffer_, count, &bpos, &pairs, 1, MPI_INT,
+ remoteIndices_.communicator());
+
+ // Insert the entry on the remote process to our
+ // remote index list
+ SLList<std::pair<int,Attribute> > sourceAttributeList;
+ sourceAttributeList.push_back(std::make_pair(source,Attribute(sourceAttribute)));
+#ifndef NDEBUG
+ bool foundSelf = false;
+#endif
+ Attribute myAttribute=Attribute();
+
+ // Unpack the remote indices
+ for(; pairs>0; --pairs) {
+ // Unpack the process id that knows the index
+ int process;
+ char attribute;
+ MPI_Unpack(receiveBuffer_, count, &bpos, &process, 1, MPI_INT,
+ remoteIndices_.communicator());
+ // Unpack the attribute
+ MPI_Unpack(receiveBuffer_, count, &bpos, &attribute, 1, MPI_CHAR,
+ remoteIndices_.communicator());
+
+ if(process==rank_) {
+#ifndef NDEBUG
+ foundSelf=true;
+#endif
+ myAttribute=Attribute(attribute);
+ // Now we know the local attribute of the global index
+ //Only add the index if it is unknown.
+ // Do we know that global index already?
+ auto pos = std::lower_bound(index, iEnd, IndexPair(global));
+
+ if(pos == iEnd || pos->global() != global) {
+ // no entry with this global index
+ indexSet_.add(global,
+ ParallelLocalIndex<Attribute>(numberer(global),
+ myAttribute, true));
+ Dune::dvverb << "Adding "<<global<<" "<<myAttribute<<std::endl;
+ continue;
+ }
+
+ // because of above the global indices match. Add only if the attribute is different
+ bool indexIsThere = false;
+ index=pos;
+
+ for(; pos->global()==global; ++pos)
+ if(pos->local().attribute() == myAttribute) {
+ Dune::dvverb<<"found "<<global<<" "<<myAttribute<<std::endl;
+ indexIsThere = true;
+ break;
+ }
+
+ if(!indexIsThere) {
+ indexSet_.add(global,
+ ParallelLocalIndex<Attribute>(numberer(global),
+ myAttribute, true));
+ Dune::dvverb << "Adding "<<global<<" "<<myAttribute<<std::endl;
+ }
+
+ }else{
+ sourceAttributeList.push_back(std::make_pair(process,Attribute(attribute)));
+ }
+ }
+ assert(foundSelf);
+ // Insert remote indices
+ typedef typename SLList<std::pair<int,Attribute> >::const_iterator Iter;
+ for(Iter i=sourceAttributeList.begin(), end=sourceAttributeList.end();
+ i!=end; ++i)
+ insertIntoRemoteIndexList(i->first, std::make_pair(global, myAttribute),
+ i->second);
+ --publish;
+ }
+
+ resetIteratorsMap();
+ }
+
+ template<typename T>
+ void IndicesSyncer<T>::resetIteratorsMap(){
+
+ // Reset iterators in all tuples.
+ const auto remoteEnd = remoteIndices_.remoteIndices_.end();
+ auto iterators = iteratorsMap_.begin();
+ auto global = globalMap_.begin();
+ auto added = oldMap_.begin();
+
+ for(auto remote = remoteIndices_.remoteIndices_.begin();
+ remote != remoteEnd; ++remote, ++global, ++added, ++iterators) {
+ iterators->second.reset(*(remote->second.first), global->second, added->second);
+ }
+ }
+
+ template<typename T>
+ bool IndicesSyncer<T>::checkReset(const Iterators& iterators, RemoteIndexList& rList, GlobalIndexList& gList,
+ BoolList& bList){
+
+ if(std::get<0>(iterators.iterators_) != rList.begin())
+ return false;
+ if(std::get<1>(iterators.iterators_) != gList.begin())
+ return false;
+ if(std::get<2>(iterators.iterators_) != bList.begin())
+ return false;
+ return true;
+ }
+
+
+ template<typename T>
+ bool IndicesSyncer<T>::checkReset(){
+
+ // Reset iterators in all tuples.
+ const auto remoteEnd = remoteIndices_.remoteIndices_.end();
+ auto iterators = iteratorsMap_.begin();
+ auto global = globalMap_.begin();
+ auto added = oldMap_.begin();
+ bool ret = true;
+
+ for(auto remote = remoteIndices_.remoteIndices_.begin();
+ remote != remoteEnd; ++remote, ++global, ++added, ++iterators) {
+ if(!checkReset(iterators->second, *(remote->second.first), global->second,
+ added->second))
+ ret=false;
+ }
+ return ret;
+ }
+}
+
+#endif
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_INTERFACE_HH
+#define DUNE_INTERFACE_HH
+
+#if HAVE_MPI
+
+#include "remoteindices.hh"
+#include <dune/common/enumset.hh>
+
+namespace Dune
+{
+ /** @addtogroup Common_Parallel
+ *
+ * @{
+ */
+ /**
+ * @file
+ * @brief Provides classes for building the communication
+ * interface between remote indices.
+ * @author Markus Blatt
+ */
+
+ /** @} */
+ /**
+ * @brief Base class of all classes representing a communication
+ * interface.
+ *
+ * It provides an generic utility method for building the interface
+ * for a set of remote indices.
+ */
+ class InterfaceBuilder
+ {
+ public:
+ class RemoteIndicesStateError : public InvalidStateException
+ {};
+
+ virtual ~InterfaceBuilder()
+ {}
+
+ protected:
+ /**
+ * @brief Not for public use.
+ */
+ InterfaceBuilder()
+ {}
+
+ /**
+ * @brief Builds the interface between remote processes.
+ *
+ *
+ * The types T1 and T2 are classes representing a set of
+ * enumeration values of type InterfaceBuilder::Attribute. They have to provide
+ * a (static) method
+ * \code
+ * bool contains(Attribute flag) const;
+ * \endcode
+ * for checking whether the set contains a specific flag.
+ * This functionality is for example provided the classes
+ * EnumItem, EnumRange and Combine.
+ *
+ * If the template parameter send is true the sending side of
+ * the interface will be built, otherwise the information for
+ * receiving will be built.
+ *
+ *
+ * If the template parameter send is true we create interface for sending
+ * in a forward communication.
+ *
+ * @param remoteIndices The indices known to remote processes.
+ * @param sourceFlags The set of flags marking source indices.
+ * @param destFlags The setof flags markig destination indices.
+ * @param functor A functor for callbacks. It should provide the
+ * following methods:
+ * \code
+ * // Reserve memory for the interface to processor proc. The interface
+ * // has to hold size entries
+ * void reserve(int proc, int size);
+ *
+ * // Add an entry to the interface
+ * // We will send/receive size entries at index local to process proc
+ * void add(int proc, int local);
+ * \endcode
+ */
+ template<class R, class T1, class T2, class Op, bool send>
+ void buildInterface (const R& remoteIndices,
+ const T1& sourceFlags, const T2& destFlags,
+ Op& functor) const;
+ };
+
+ /**
+ * @brief Information describing an interface.
+ *
+ * This class is used for temporary gathering information
+ * about the interface needed for actually building it. It
+ * is used be class Interface as functor for InterfaceBuilder::build.
+ */
+ class InterfaceInformation
+ {
+
+ public:
+
+ /**
+ * @brief Get the number of entries in the interface.
+ */
+ size_t size() const
+ {
+ return size_;
+ }
+ /**
+ * @brief Get the local index for an entry.
+ * @param i The index of the entry.
+ */
+ std::size_t& operator[](size_t i)
+ {
+ assert(i<size_);
+ return indices_[i];
+ }
+ /**
+ * @brief Get the local index for an entry.
+ * @param i The index of the entry.
+ */
+ std::size_t operator[](size_t i) const
+ {
+ assert(i<size_);
+ return indices_[i];
+ }
+ /**
+ * @brief Reserve space for a number of entries.
+ * @param size The maximum number of entries to hold.
+ */
+ void reserve(size_t size)
+ {
+ indices_ = new std::size_t[size];
+ maxSize_ = size;
+
+ }
+ /**
+ * brief Frees allocated memory.
+ */
+ void free()
+ {
+ if(indices_)
+ delete[] indices_;
+ maxSize_ = 0;
+ size_=0;
+ indices_=0;
+ }
+ /**
+ * @brief Add a new index to the interface.
+ */
+ void add(std::size_t index)
+ {
+ assert(size_<maxSize_);
+ indices_[size_++]=index;
+ }
+
+ InterfaceInformation()
+ : size_(0), maxSize_(0), indices_(0)
+ {}
+
+ virtual ~InterfaceInformation()
+ {}
+
+ bool operator!=(const InterfaceInformation& o) const
+ {
+ return !operator==(o);
+ }
+
+ bool operator==(const InterfaceInformation& o) const
+ {
+ if(size_!=o.size_)
+ return false;
+ for(std::size_t i=0; i< size_; ++i)
+ if(indices_[i]!=o.indices_[i])
+ return false;
+ return true;
+ }
+
+ private:
+ /**
+ * @brief The number of entries in the interface.
+ */
+ size_t size_;
+ /**
+ * @brief The maximum number of indices we can hold.
+ */
+ size_t maxSize_;
+ /**
+ * @brief The local indices of the interface.
+ */
+ std::size_t* indices_;
+ };
+
+ /** @addtogroup Common_Parallel
+ *
+ * @{
+ */
+
+ /**
+ * @brief Communication interface between remote and local indices.
+ *
+ * Describes the communication interface between
+ * indices on the local process and those on remote processes.
+ */
+ class Interface : public InterfaceBuilder
+ {
+
+ public:
+ /**
+ * @brief The type of the map form process number to InterfaceInformation for
+ * sending and receiving to and from it.
+ */
+ typedef std::map<int,std::pair<InterfaceInformation,InterfaceInformation> > InformationMap;
+
+ /**
+ * @brief Builds the interface.
+ *
+ * The types T1 and T2 are classes representing a set of
+ * enumeration values of type Interface::Attribute. They have to provide
+ * a (static) method
+ * \code
+ * bool contains(Attribute flag) const;
+ * \endcode
+ * for checking whether the set contains a specific flag.
+ * This functionality is for example provided the classes
+ * EnumItem, EnumRange and Combine.
+ * @param remoteIndices The indices known to remote processes.
+ * @param sourceFlags The set of flags marking indices we send from.
+ * @param destFlags The set of flags marking indices we receive for.
+ */
+ template<typename R, typename T1, typename T2>
+ void build(const R& remoteIndices, const T1& sourceFlags,
+ const T2& destFlags);
+
+ /**
+ * @brief Frees memory allocated during the build.
+ */
+ void free();
+
+ /**
+ * @brief Get the MPI Communicator.
+ */
+ MPI_Comm communicator() const;
+
+ /**
+ * @brief Get information about the interfaces.
+ *
+ * @return Map of the interfaces.
+ * The key of the map is the process number and the value
+ * is the information pair (first the send and then the receive
+ * information).
+ */
+ const InformationMap& interfaces() const;
+
+ Interface(MPI_Comm comm)
+ : communicator_(comm), interfaces_()
+ {}
+
+ Interface()
+ : communicator_(MPI_COMM_NULL), interfaces_()
+ {}
+
+ /**
+ * @brief Print the interface to std::out for debugging.
+ */
+ void print() const;
+
+ bool operator!=(const Interface& o) const
+ {
+ return ! operator==(o);
+ }
+
+ bool operator==(const Interface& o) const
+ {
+ if(communicator_!=o.communicator_)
+ return false;
+ if(interfaces_.size()!=o.interfaces_.size())
+ return false;
+ typedef InformationMap::const_iterator MIter;
+
+ for(MIter m=interfaces_.begin(), om=o.interfaces_.begin();
+ m!=interfaces_.end(); ++m, ++om)
+ {
+ if(om->first!=m->first)
+ return false;
+ if(om->second.first!=om->second.first)
+ return false;
+ if(om->second.second!=om->second.second)
+ return false;
+ }
+ return true;
+ }
+
+ /**
+ * @brief Destructor.
+ */
+ virtual ~Interface();
+
+ void strip();
+ protected:
+
+ /**
+ * @brief Get information about the interfaces.
+ *
+ * @return Map of the interfaces.
+ * The key of the map is the process number and the value
+ * is the information pair (first the send and then the receive
+ * information).
+ */
+ InformationMap& interfaces();
+
+ /** @brief The MPI communicator we use. */
+ MPI_Comm communicator_;
+
+ private:
+ /**
+ * @brief Information about the interfaces.
+ *
+ * The key of the map is the process number and the value
+ * is the information pair (first the send and then the receive
+ * information).
+ */
+ InformationMap interfaces_;
+
+ template<bool send>
+ class InformationBuilder
+ {
+ public:
+ InformationBuilder(InformationMap& interfaces)
+ : interfaces_(interfaces)
+ {}
+
+ void reserve(int proc, int size)
+ {
+ if(send)
+ interfaces_[proc].first.reserve(size);
+ else
+ interfaces_[proc].second.reserve(size);
+ }
+ void add(int proc, std::size_t local)
+ {
+ if(send) {
+ interfaces_[proc].first.add(local);
+ }else{
+ interfaces_[proc].second.add(local);
+ }
+ }
+
+ private:
+ InformationMap& interfaces_;
+ };
+ };
+
+ template<class R, class T1, class T2, class Op, bool send>
+ void InterfaceBuilder::buildInterface(const R& remoteIndices, const T1& sourceFlags, const T2& destFlags, Op& interfaceInformation) const
+ {
+
+ if(!remoteIndices.isSynced())
+ DUNE_THROW(RemoteIndicesStateError,"RemoteIndices is not in sync with the index set. Call RemoteIndices::rebuild first!");
+ // Allocate the memory for the data type construction.
+ typedef R RemoteIndices;
+ typedef typename RemoteIndices::RemoteIndexMap::const_iterator const_iterator;
+
+ const const_iterator end=remoteIndices.end();
+
+ int rank;
+
+ MPI_Comm_rank(remoteIndices.communicator(), &rank);
+
+ // Allocate memory for the type construction.
+ for(const_iterator process=remoteIndices.begin(); process != end; ++process) {
+ // Messure the number of indices send to the remote process first
+ int size=0;
+ typedef typename RemoteIndices::RemoteIndexList::const_iterator RemoteIterator;
+ const RemoteIterator remoteEnd = send ? process->second.first->end() :
+ process->second.second->end();
+ RemoteIterator remote = send ? process->second.first->begin() : process->second.second->begin();
+
+ while(remote!=remoteEnd) {
+ if( send ? destFlags.contains(remote->attribute()) :
+ sourceFlags.contains(remote->attribute())) {
+
+ // do we send the index?
+ if( send ? sourceFlags.contains(remote->localIndexPair().local().attribute()) :
+ destFlags.contains(remote->localIndexPair().local().attribute()))
+ ++size;
+ }
+ ++remote;
+ }
+ interfaceInformation.reserve(process->first, size);
+ }
+
+ // compare the local and remote indices and set up the types
+
+ for(const_iterator process=remoteIndices.begin(); process != end; ++process) {
+ typedef typename RemoteIndices::RemoteIndexList::const_iterator RemoteIterator;
+ const RemoteIterator remoteEnd = send ? process->second.first->end() :
+ process->second.second->end();
+ RemoteIterator remote = send ? process->second.first->begin() : process->second.second->begin();
+
+ while(remote!=remoteEnd) {
+ if( send ? destFlags.contains(remote->attribute()) :
+ sourceFlags.contains(remote->attribute())) {
+ // do we send the index?
+ if( send ? sourceFlags.contains(remote->localIndexPair().local().attribute()) :
+ destFlags.contains(remote->localIndexPair().local().attribute()))
+ interfaceInformation.add(process->first,remote->localIndexPair().local().local());
+ }
+ ++remote;
+ }
+ }
+ }
+
+ inline MPI_Comm Interface::communicator() const
+ {
+ return communicator_;
+
+ }
+
+
+ inline const std::map<int,std::pair<InterfaceInformation,InterfaceInformation> >& Interface::interfaces() const
+ {
+ return interfaces_;
+ }
+
+ inline std::map<int,std::pair<InterfaceInformation,InterfaceInformation> >& Interface::interfaces()
+ {
+ return interfaces_;
+ }
+
+ inline void Interface::print() const
+ {
+ typedef InformationMap::const_iterator const_iterator;
+ const const_iterator end=interfaces_.end();
+ int rank;
+ MPI_Comm_rank(communicator(), &rank);
+
+ for(const_iterator infoPair=interfaces_.begin(); infoPair!=end; ++infoPair) {
+ {
+ std::cout<<rank<<": send for process "<<infoPair->first<<": ";
+ const InterfaceInformation& info(infoPair->second.first);
+ for(size_t i=0; i < info.size(); i++)
+ std::cout<<info[i]<<" ";
+ std::cout<<std::endl;
+ } {
+
+ std::cout<<rank<<": receive for process "<<infoPair->first<<": ";
+ const InterfaceInformation& info(infoPair->second.second);
+ for(size_t i=0; i < info.size(); i++)
+ std::cout<<info[i]<<" ";
+ std::cout<<std::endl;
+ }
+
+ }
+ }
+
+ template<typename R, typename T1, typename T2>
+ inline void Interface::build(const R& remoteIndices, const T1& sourceFlags,
+ const T2& destFlags)
+ {
+ communicator_=remoteIndices.communicator();
+
+ assert(interfaces_.empty());
+
+ // Build the send interface
+ InformationBuilder<true> sendInformation(interfaces_);
+ this->template buildInterface<R,T1,T2,InformationBuilder<true>,true>(remoteIndices, sourceFlags,
+ destFlags, sendInformation);
+
+ // Build the receive interface
+ InformationBuilder<false> recvInformation(interfaces_);
+ this->template buildInterface<R,T1,T2,InformationBuilder<false>,false>(remoteIndices,sourceFlags,
+ destFlags, recvInformation);
+ strip();
+ }
+ inline void Interface::strip()
+ {
+ typedef InformationMap::iterator const_iterator;
+ for(const_iterator interfacePair = interfaces_.begin(); interfacePair != interfaces_.end();)
+ if(interfacePair->second.first.size()==0 && interfacePair->second.second.size()==0) {
+ interfacePair->second.first.free();
+ interfacePair->second.second.free();
+ const_iterator toerase=interfacePair++;
+ interfaces_.erase(toerase);
+ }else
+ ++interfacePair;
+ }
+
+ inline void Interface::free()
+ {
+ typedef InformationMap::iterator iterator;
+ typedef InformationMap::const_iterator const_iterator;
+ const const_iterator end = interfaces_.end();
+ for(iterator interfacePair = interfaces_.begin(); interfacePair != end; ++interfacePair) {
+ interfacePair->second.first.free();
+ interfacePair->second.second.free();
+ }
+ interfaces_.clear();
+ }
+
+ inline Interface::~Interface()
+ {
+ free();
+ }
+ /** @} */
+
+ inline std::ostream& operator<<(std::ostream& os, const Interface& interface)
+ {
+ typedef Interface::InformationMap InfoMap;
+ typedef InfoMap::const_iterator Iter;
+ for(Iter i=interface.interfaces().begin(), end = interface.interfaces().end();
+ i!=end; ++i)
+ {
+ os<<i->first<<": [ source=[";
+ for(std::size_t j=0; j < i->second.first.size(); ++j)
+ os<<i->second.first[j]<<" ";
+ os<<"] size="<<i->second.first.size()<<", target=[";
+ for(std::size_t j=0; j < i->second.second.size(); ++j)
+ os<<i->second.second[j]<<" ";
+ os<<"] size="<<i->second.second.size()<<"\n";
+ }
+ return os;
+ }
+}
+#endif // HAVE_MPI
+
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+#ifndef DUNE_COMMON_LOCALINDEX_HH
+#define DUNE_COMMON_LOCALINDEX_HH
+
+#include <cstddef>
+
+namespace Dune
+{
+
+
+ /** @addtogroup Common_Parallel
+ *
+ * @{
+ */
+ /**
+ * @file
+ * @brief Provides classes for use as the local index in ParallelIndexSet.
+ * @author Markus Blatt
+ */
+ /**
+ * @brief The states avaiable for the local indices.
+ * @see LocalIndex::state()
+ */
+ enum LocalIndexState {VALID, DELETED};
+
+
+ /**
+ * @brief An index present on the local process.
+ */
+ class LocalIndex
+ {
+ public:
+ /**
+ * @brief Constructor.
+ * known to other processes.
+ */
+ LocalIndex() :
+ localIndex_(0), state_(VALID){}
+
+
+ /**
+ * @brief Constructor.
+ * @param index The value of the index.
+ */
+ LocalIndex(std::size_t index) :
+ localIndex_(index), state_(VALID){}
+ /**
+ * @brief get the local index.
+ * @return The local index.
+ */
+ inline const std::size_t& local() const;
+
+ /**
+ * @brief Convert to the local index represented by an int.
+ */
+ inline operator std::size_t() const;
+
+ /**
+ * @brief Assign a new local index.
+ *
+ * @param index The new local index.
+ */
+ inline LocalIndex& operator=(std::size_t index);
+
+ /**
+ * @brief Get the state.
+ * @return The state.
+ */
+ inline LocalIndexState state() const;
+
+ /**
+ * @brief Set the state.
+ * @param state The state to set.
+ */
+ inline void setState(LocalIndexState state);
+
+ private:
+ /** @brief The local index. */
+ std::size_t localIndex_;
+
+ /**
+ * @brief The state of the index.
+ *
+ * Has to be one of LocalIndexState!
+ * @see LocalIndexState.
+ */
+ char state_;
+
+ };
+
+
+
+ inline const std::size_t& LocalIndex::local() const {
+ return localIndex_;
+ }
+
+ inline LocalIndex::operator std::size_t() const {
+ return localIndex_;
+ }
+
+ inline LocalIndex& LocalIndex::operator=(std::size_t index){
+ localIndex_ = index;
+ return *this;
+ }
+
+ inline LocalIndexState LocalIndex::state() const {
+ return static_cast<LocalIndexState>(state_);
+ }
+
+ inline void LocalIndex::setState(LocalIndexState state){
+ state_ = static_cast<char>(state);
+ }
+
+ /** @} */
+
+} // namespace Dune
+
+#endif
--- /dev/null
+// Will be removed after the 2.7 release
+#warning "Deprecated header, use #include <dune/common/parallel/mpicommunication.hh> instead!"
+#include <dune/common/parallel/mpicommunication.hh>
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_COMMON_PARALLEL_MPICOMMUNICATION_HH
+#define DUNE_COMMON_PARALLEL_MPICOMMUNICATION_HH
+
+/*!
+ \file
+ \brief Implements an utility class that provides
+ MPI's collective communication methods.
+
+ \ingroup ParallelCommunication
+ */
+
+#if HAVE_MPI
+
+#include <algorithm>
+#include <functional>
+#include <memory>
+
+#include <mpi.h>
+
+#include <dune/common/binaryfunctions.hh>
+#include <dune/common/exceptions.hh>
+#include <dune/common/parallel/communication.hh>
+#include <dune/common/parallel/mpitraits.hh>
+#include <dune/common/parallel/mpifuture.hh>
+#include <dune/common/parallel/mpidata.hh>
+
+namespace Dune
+{
+
+ //=======================================================
+ // use singleton pattern and template specialization to
+ // generate MPI operations
+ //=======================================================
+
+ template<typename Type, typename BinaryFunction, typename Enable=void>
+ class Generic_MPI_Op
+ {
+
+ public:
+ static MPI_Op get ()
+ {
+ if (!op)
+ {
+ op = std::make_unique<MPI_Op>();
+ // The following line leaks an MPI operation object, because the corresponding
+ //`MPI_Op_free` is never called. It is never called because there is no easy
+ // way to call it at the right moment: right before the call to MPI_Finalize.
+ // See https://gitlab.dune-project.org/core/dune-istl/issues/80
+ MPI_Op_create((void (*)(void*, void*, int*, MPI_Datatype*))&operation,true,op.get());
+ }
+ return *op;
+ }
+ private:
+ static void operation (Type *in, Type *inout, int *len, MPI_Datatype*)
+ {
+ BinaryFunction func;
+
+ for (int i=0; i< *len; ++i, ++in, ++inout) {
+ Type temp;
+ temp = func(*in, *inout);
+ *inout = temp;
+ }
+ }
+ Generic_MPI_Op () {}
+ Generic_MPI_Op (const Generic_MPI_Op& ) {}
+ static std::unique_ptr<MPI_Op> op;
+ };
+
+
+ template<typename Type, typename BinaryFunction, typename Enable>
+ std::unique_ptr<MPI_Op> Generic_MPI_Op<Type,BinaryFunction, Enable>::op;
+
+#define ComposeMPIOp(func,op) \
+ template<class T, class S> \
+ class Generic_MPI_Op<T, func<S>, std::enable_if_t<MPITraits<S>::is_intrinsic> >{ \
+ public: \
+ static MPI_Op get(){ \
+ return op; \
+ } \
+ private: \
+ Generic_MPI_Op () {} \
+ Generic_MPI_Op (const Generic_MPI_Op & ) {} \
+ }
+
+
+ ComposeMPIOp(std::plus, MPI_SUM);
+ ComposeMPIOp(std::multiplies, MPI_PROD);
+ ComposeMPIOp(Min, MPI_MIN);
+ ComposeMPIOp(Max, MPI_MAX);
+
+#undef ComposeMPIOp
+
+
+ //=======================================================
+ // use singleton pattern and template specialization to
+ // generate MPI operations
+ //=======================================================
+
+ /*! \brief Specialization of Communication for MPI
+ \ingroup ParallelCommunication
+ */
+ template<>
+ class Communication<MPI_Comm>
+ {
+ public:
+ //! Instantiation using a MPI communicator
+ Communication (const MPI_Comm& c = MPI_COMM_WORLD)
+ : communicator(c)
+ {
+ if(communicator!=MPI_COMM_NULL) {
+ int initialized = 0;
+ MPI_Initialized(&initialized);
+ if (!initialized)
+ DUNE_THROW(ParallelError,"You must call MPIHelper::instance(argc,argv) in your main() function before using the MPI Communication!");
+ MPI_Comm_rank(communicator,&me);
+ MPI_Comm_size(communicator,&procs);
+ }else{
+ procs=0;
+ me=-1;
+ }
+ }
+
+ //! @copydoc Communication::rank
+ int rank () const
+ {
+ return me;
+ }
+
+ //! @copydoc Communication::size
+ int size () const
+ {
+ return procs;
+ }
+
+ //! @copydoc Communication::send
+ template<class T>
+ int send(const T& data, int dest_rank, int tag) const
+ {
+ auto mpi_data = getMPIData(data);
+ return MPI_Send(mpi_data.ptr(), mpi_data.size(), mpi_data.type(),
+ dest_rank, tag, communicator);
+ }
+
+ //! @copydoc Communication::isend
+ template<class T>
+ MPIFuture<const T> isend(const T&& data, int dest_rank, int tag) const
+ {
+ MPIFuture<const T> future(std::forward<const T>(data));
+ auto mpidata = future.get_mpidata();
+ MPI_Isend(mpidata.ptr(), mpidata.size(), mpidata.type(),
+ dest_rank, tag, communicator, &future.req_);
+ return future;
+ }
+
+ //! @copydoc Communication::recv
+ template<class T>
+ T recv(T&& data, int source_rank, int tag, MPI_Status* status = MPI_STATUS_IGNORE) const
+ {
+ T lvalue_data(std::forward<T>(data));
+ auto mpi_data = getMPIData(lvalue_data);
+ MPI_Recv(mpi_data.ptr(), mpi_data.size(), mpi_data.type(),
+ source_rank, tag, communicator, status);
+ return lvalue_data;
+ }
+
+ //! @copydoc Communication::irecv
+ template<class T>
+ MPIFuture<T> irecv(T&& data, int source_rank, int tag) const
+ {
+ MPIFuture<T> future(std::forward<T>(data));
+ auto mpidata = future.get_mpidata();
+ MPI_Irecv(mpidata.ptr(), mpidata.size(), mpidata.type(),
+ source_rank, tag, communicator, &future.req_);
+ return future;
+ }
+
+ template<class T>
+ T rrecv(T&& data, int source_rank, int tag, MPI_Status* status = MPI_STATUS_IGNORE) const
+ {
+ MPI_Status _status;
+ MPI_Message _message;
+ T lvalue_data(std::forward<T>(data));
+ auto mpi_data = getMPIData(lvalue_data);
+ static_assert(!mpi_data.static_size, "rrecv work only for non-static-sized types.");
+ if(status == MPI_STATUS_IGNORE)
+ status = &_status;
+ MPI_Mprobe(source_rank, tag, communicator, &_message, status);
+ int size;
+ MPI_Get_count(status, mpi_data.type(), &size);
+ mpi_data.resize(size);
+ MPI_Mrecv(mpi_data.ptr(), mpi_data.size(), mpi_data.type(), &_message, status);
+ return lvalue_data;
+ }
+
+ //! @copydoc Communication::sum
+ template<typename T>
+ T sum (const T& in) const
+ {
+ T out;
+ allreduce<std::plus<T> >(&in,&out,1);
+ return out;
+ }
+
+ //! @copydoc Communication::sum
+ template<typename T>
+ int sum (T* inout, int len) const
+ {
+ return allreduce<std::plus<T> >(inout,len);
+ }
+
+ //! @copydoc Communication::prod
+ template<typename T>
+ T prod (const T& in) const
+ {
+ T out;
+ allreduce<std::multiplies<T> >(&in,&out,1);
+ return out;
+ }
+
+ //! @copydoc Communication::prod
+ template<typename T>
+ int prod (T* inout, int len) const
+ {
+ return allreduce<std::multiplies<T> >(inout,len);
+ }
+
+ //! @copydoc Communication::min
+ template<typename T>
+ T min (const T& in) const
+ {
+ T out;
+ allreduce<Min<T> >(&in,&out,1);
+ return out;
+ }
+
+ //! @copydoc Communication::min
+ template<typename T>
+ int min (T* inout, int len) const
+ {
+ return allreduce<Min<T> >(inout,len);
+ }
+
+
+ //! @copydoc Communication::max
+ template<typename T>
+ T max (const T& in) const
+ {
+ T out;
+ allreduce<Max<T> >(&in,&out,1);
+ return out;
+ }
+
+ //! @copydoc Communication::max
+ template<typename T>
+ int max (T* inout, int len) const
+ {
+ return allreduce<Max<T> >(inout,len);
+ }
+
+ //! @copydoc Communication::barrier
+ int barrier () const
+ {
+ return MPI_Barrier(communicator);
+ }
+
+ //! @copydoc Communication::ibarrier
+ MPIFuture<void> ibarrier () const
+ {
+ MPIFuture<void> future(true); // make a valid MPIFuture<void>
+ MPI_Ibarrier(communicator, &future.req_);
+ return future;
+ }
+
+
+ //! @copydoc Communication::broadcast
+ template<typename T>
+ int broadcast (T* inout, int len, int root) const
+ {
+ return MPI_Bcast(inout,len,MPITraits<T>::getType(),root,communicator);
+ }
+
+ //! @copydoc Communication::ibroadcast
+ template<class T>
+ MPIFuture<T> ibroadcast(T&& data, int root) const{
+ MPIFuture<T> future(std::forward<T>(data));
+ auto mpidata = future.get_mpidata();
+ MPI_Ibcast(mpidata.ptr(),
+ mpidata.size(),
+ mpidata.type(),
+ root,
+ communicator,
+ &future.req_);
+ return future;
+ }
+
+ //! @copydoc Communication::gather()
+ //! @note out must have space for P*len elements
+ template<typename T>
+ int gather (const T* in, T* out, int len, int root) const
+ {
+ return MPI_Gather(const_cast<T*>(in),len,MPITraits<T>::getType(),
+ out,len,MPITraits<T>::getType(),
+ root,communicator);
+ }
+
+ //! @copydoc Communication::igather
+ template<class TIN, class TOUT = std::vector<TIN>>
+ MPIFuture<TOUT, TIN> igather(TIN&& data_in, TOUT&& data_out, int root) const{
+ MPIFuture<TOUT, TIN> future(std::forward<TOUT>(data_out), std::forward<TIN>(data_in));
+ auto mpidata_in = future.get_send_mpidata();
+ auto mpidata_out = future.get_mpidata();
+ assert(root != me || mpidata_in.size()*procs <= mpidata_out.size());
+ int outlen = (me==root) * mpidata_in.size();
+ MPI_Igather(mpidata_in.ptr(), mpidata_in.size(), mpidata_in.type(),
+ mpidata_out.ptr(), outlen, mpidata_out.type(),
+ root, communicator, &future.req_);
+ return future;
+ }
+
+ //! @copydoc Communication::gatherv()
+ template<typename T>
+ int gatherv (const T* in, int sendDataLen, T* out, int* recvDataLen, int* displ, int root) const
+ {
+ return MPI_Gatherv(const_cast<T*>(in),sendDataLen,MPITraits<T>::getType(),
+ out,recvDataLen,displ,MPITraits<T>::getType(),
+ root,communicator);
+ }
+
+ //! @copydoc Communication::scatter()
+ //! @note out must have space for P*len elements
+ template<typename T>
+ int scatter (const T* sendData, T* recvData, int len, int root) const
+ {
+ return MPI_Scatter(const_cast<T*>(sendData),len,MPITraits<T>::getType(),
+ recvData,len,MPITraits<T>::getType(),
+ root,communicator);
+ }
+
+ //! @copydoc Communication::iscatter
+ template<class TIN, class TOUT = TIN>
+ MPIFuture<TOUT, TIN> iscatter(TIN&& data_in, TOUT&& data_out, int root) const
+ {
+ MPIFuture<TOUT, TIN> future(std::forward<TOUT>(data_out), std::forward<TIN>(data_in));
+ auto mpidata_in = future.get_send_mpidata();
+ auto mpidata_out = future.get_mpidata();
+ int inlen = (me==root) * mpidata_in.size()/procs;
+ MPI_Iscatter(mpidata_in.ptr(), inlen, mpidata_in.type(),
+ mpidata_out.ptr(), mpidata_out.size(), mpidata_out.type(),
+ root, communicator, &future.req_);
+ return future;
+ }
+
+ //! @copydoc Communication::scatterv()
+ template<typename T>
+ int scatterv (const T* sendData, int* sendDataLen, int* displ, T* recvData, int recvDataLen, int root) const
+ {
+ return MPI_Scatterv(const_cast<T*>(sendData),sendDataLen,displ,MPITraits<T>::getType(),
+ recvData,recvDataLen,MPITraits<T>::getType(),
+ root,communicator);
+ }
+
+
+ operator MPI_Comm () const
+ {
+ return communicator;
+ }
+
+ //! @copydoc Communication::allgather()
+ template<typename T, typename T1>
+ int allgather(const T* sbuf, int count, T1* rbuf) const
+ {
+ return MPI_Allgather(const_cast<T*>(sbuf), count, MPITraits<T>::getType(),
+ rbuf, count, MPITraits<T1>::getType(),
+ communicator);
+ }
+
+ //! @copydoc Communication::iallgather
+ template<class TIN, class TOUT = TIN>
+ MPIFuture<TOUT, TIN> iallgather(TIN&& data_in, TOUT&& data_out) const
+ {
+ MPIFuture<TOUT, TIN> future(std::forward<TOUT>(data_out), std::forward<TIN>(data_in));
+ auto mpidata_in = future.get_send_mpidata();
+ auto mpidata_out = future.get_mpidata();
+ assert(mpidata_in.size()*procs <= mpidata_out.size());
+ int outlen = mpidata_in.size();
+ MPI_Iallgather(mpidata_in.ptr(), mpidata_in.size(), mpidata_in.type(),
+ mpidata_out.ptr(), outlen, mpidata_out.type(),
+ communicator, &future.req_);
+ return future;
+ }
+
+ //! @copydoc Communication::allgatherv()
+ template<typename T>
+ int allgatherv (const T* in, int sendDataLen, T* out, int* recvDataLen, int* displ) const
+ {
+ return MPI_Allgatherv(const_cast<T*>(in),sendDataLen,MPITraits<T>::getType(),
+ out,recvDataLen,displ,MPITraits<T>::getType(),
+ communicator);
+ }
+
+ //! @copydoc Communication::allreduce(Type* inout,int len) const
+ template<typename BinaryFunction, typename Type>
+ int allreduce(Type* inout, int len) const
+ {
+ Type* out = new Type[len];
+ int ret = allreduce<BinaryFunction>(inout,out,len);
+ std::copy(out, out+len, inout);
+ delete[] out;
+ return ret;
+ }
+
+ template<typename BinaryFunction, typename Type>
+ Type allreduce(Type&& in) const{
+ Type lvalue_data = std::forward<Type>(in);
+ auto data = getMPIData(lvalue_data);
+ MPI_Allreduce(MPI_IN_PLACE, data.ptr(), data.size(), data.type(),
+ (Generic_MPI_Op<Type, BinaryFunction>::get()),
+ communicator);
+ return lvalue_data;
+ }
+
+ //! @copydoc Communication::iallreduce
+ template<class BinaryFunction, class TIN, class TOUT = TIN>
+ MPIFuture<TOUT, TIN> iallreduce(TIN&& data_in, TOUT&& data_out) const {
+ MPIFuture<TOUT, TIN> future(std::forward<TOUT>(data_out), std::forward<TIN>(data_in));
+ auto mpidata_in = future.get_send_mpidata();
+ auto mpidata_out = future.get_mpidata();
+ assert(mpidata_out.size() == mpidata_in.size());
+ assert(mpidata_out.type() == mpidata_in.type());
+ MPI_Iallreduce(mpidata_in.ptr(), mpidata_out.ptr(),
+ mpidata_out.size(), mpidata_out.type(),
+ (Generic_MPI_Op<TIN, BinaryFunction>::get()),
+ communicator, &future.req_);
+ return future;
+ }
+
+ //! @copydoc Communication::iallreduce
+ template<class BinaryFunction, class T>
+ MPIFuture<T> iallreduce(T&& data) const{
+ MPIFuture<T> future(std::forward<T>(data));
+ auto mpidata = future.get_mpidata();
+ MPI_Iallreduce(MPI_IN_PLACE, mpidata.ptr(),
+ mpidata.size(), mpidata.type(),
+ (Generic_MPI_Op<T, BinaryFunction>::get()),
+ communicator, &future.req_);
+ return future;
+ }
+
+ //! @copydoc Communication::allreduce(Type* in,Type* out,int len) const
+ template<typename BinaryFunction, typename Type>
+ int allreduce(const Type* in, Type* out, int len) const
+ {
+ return MPI_Allreduce(const_cast<Type*>(in), out, len, MPITraits<Type>::getType(),
+ (Generic_MPI_Op<Type, BinaryFunction>::get()),communicator);
+ }
+
+ private:
+ MPI_Comm communicator;
+ int me;
+ int procs;
+ };
+} // namespace dune
+
+#endif // HAVE_MPI
+
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+#ifndef DUNE_COMMON_PARALLEL_MPIDATA_HH
+#define DUNE_COMMON_PARALLEL_MPIDATA_HH
+
+#include <vector>
+#include <string>
+
+#if HAVE_MPI
+
+#include <dune/common/typetraits.hh>
+#include <dune/common/std/type_traits.hh>
+#include <dune/common/parallel/mpitraits.hh>
+
+/** @addtogroup ParallelCommunication
+ *
+ * @{
+ */
+/**
+ * @file
+ *
+ * @brief Interface class to translate objects to a MPI_Datatype, void*
+ * and size used for MPI calls.
+ *
+ * Furthermore it can be used to resize the object
+ * if possible. This makes it possible to receive a message with variable
+ * size. See `Communication::rrecv`.
+ *
+ * To 'register' a new dynamic type for MPI communication specialize `MPIData` or
+ * overload `getMPIData`.
+ *
+ */
+
+namespace Dune{
+
+ template<class, class = void>
+ struct MPIData;
+
+ template<class T>
+ auto getMPIData(T& t){
+ return MPIData<T>(t);
+ }
+
+ // Default implementation for static datatypes
+ template<class T, class Enable>
+ struct MPIData
+ {
+ friend auto getMPIData<T>(T&);
+ protected:
+ T& data_;
+
+ MPIData(T& t)
+ : data_(t)
+ {}
+
+ public:
+ void* ptr() const {
+ return (void*)&data_;
+ }
+
+ // indicates whether the datatype can be resized
+ static constexpr bool static_size = true;
+
+ int size() const{
+ return 1;
+ }
+
+ MPI_Datatype type() const {
+ return MPITraits<std::decay_t<T>>::getType();
+ }
+ };
+
+ // dummy implementation for void
+ template<>
+ struct MPIData<void>{
+ protected:
+ MPIData() {}
+
+ public:
+ void* ptr(){
+ return nullptr;
+ }
+ int size(){
+ return 0;
+ }
+ void get(){}
+ MPI_Datatype type() const{
+ return MPI_INT;
+ }
+ };
+
+ // specializations:
+ // std::vector of static sized elements or std::string
+ template<class T>
+ struct MPIData<T, std::void_t<std::tuple<decltype(std::declval<T>().data()),
+ decltype(std::declval<T>().size()),
+ typename std::decay_t<T>::value_type>>>{
+ private:
+ template<class U>
+ using hasResizeOp = decltype(std::declval<U>().resize(0));
+
+ protected:
+ friend auto getMPIData<T>(T&);
+ MPIData(T& t)
+ : data_(t)
+ {}
+ public:
+ static constexpr bool static_size = std::is_const<T>::value || !Std::is_detected_v<hasResizeOp, T>;
+ void* ptr() {
+ return (void*) data_.data();
+ }
+ int size() {
+ return data_.size();
+ }
+ MPI_Datatype type() const{
+ return MPITraits<typename std::decay_t<T>::value_type>::getType();
+ }
+
+ template<class S = T>
+ auto /*void*/ resize(int size)
+ -> std::enable_if_t<!std::is_const<S>::value || !Std::is_detected_v<hasResizeOp, S>>
+ {
+ data_.resize(size);
+ }
+
+ protected:
+ T& data_;
+ };
+
+}
+
+/**
+ * @}
+ */
+
+#endif
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_COMMON_PARALLEL_MPIFUTURE_HH
+#define DUNE_COMMON_PARALLEL_MPIFUTURE_HH
+
+#include <optional>
+
+#include <dune/common/parallel/communication.hh>
+#include <dune/common/parallel/future.hh>
+#include <dune/common/parallel/mpidata.hh>
+
+#if HAVE_MPI
+namespace Dune{
+
+ namespace impl{
+ template<class T>
+ struct Buffer{
+ Buffer(bool valid){
+ if(valid)
+ value = std::make_unique<T>();
+ }
+ template<class V>
+ Buffer(V&& t)
+ : value(std::make_unique<T>(std::forward<V>(t)))
+ {}
+ std::unique_ptr<T> value;
+ T get(){
+ T tmp = std::move(*value);
+ value.reset();
+ return tmp;
+ }
+ operator bool () const {
+ return (bool)value;
+ }
+ T& operator *() const{
+ return *value;
+ }
+ };
+
+ template<class T>
+ struct Buffer<T&>{
+ Buffer(bool valid = false)
+ {
+ if(valid)
+ value = T();
+ }
+ template<class V>
+ Buffer(V&& t)
+ : value(std::forward<V>(t))
+ {}
+ std::optional<std::reference_wrapper<T>> value;
+ T& get(){
+ T& tmp = *value;
+ value.reset();
+ return tmp;
+ }
+ operator bool () const{
+ return (bool)value;
+ }
+ T& operator *() const{
+ return *value;
+ }
+ };
+
+ template<>
+ struct Buffer<void>{
+ bool valid_;
+ Buffer(bool valid = false)
+ : valid_(valid)
+ {}
+ operator bool () const{
+ return valid_;
+ }
+ void get(){}
+ };
+ }
+
+ /*! \brief Provides a future-like object for MPI communication. It contains
+ the object that will be received and might contain also a sending object,
+ which must be hold (keep alive) until the communication has been completed.
+ */
+ template<class R, class S = void>
+ class MPIFuture{
+ mutable MPI_Request req_;
+ mutable MPI_Status status_;
+ impl::Buffer<R> data_;
+ impl::Buffer<S> send_data_;
+ friend class Communication<MPI_Comm>;
+ public:
+ MPIFuture(bool valid = false)
+ : req_(MPI_REQUEST_NULL)
+ , data_(valid)
+ {}
+
+ // Hide this constructor if R or S is void
+ template<class V = R, class U = S>
+ MPIFuture(V&& recv_data, U&& send_data, typename std::enable_if_t<!std::is_void<V>::value && !std::is_void<U>::value>* = 0) :
+ req_(MPI_REQUEST_NULL)
+ , data_(std::forward<R>(recv_data))
+ , send_data_(std::forward<S>(send_data))
+ {}
+
+ // hide this constructor if R is void
+ template<class V = R>
+ MPIFuture(V&& recv_data, typename std::enable_if_t<!std::is_void<V>::value>* = 0)
+ : req_(MPI_REQUEST_NULL)
+ , data_(std::forward<V>(recv_data))
+ {}
+
+ ~MPIFuture() {
+ if(req_ != MPI_REQUEST_NULL){
+ try{ // might fail when it is a collective communication
+ MPI_Cancel(&req_);
+ MPI_Request_free(&req_);
+ }catch(...){
+ }
+ }
+ }
+
+ MPIFuture(MPIFuture&& f)
+ : req_(MPI_REQUEST_NULL)
+ , data_(std::move(f.data_))
+ , send_data_(std::move(f.send_data_))
+ {
+ std::swap(req_, f.req_);
+ std::swap(status_, f.status_);
+ }
+
+ MPIFuture& operator=(MPIFuture&& f){
+ std::swap(req_, f.req_);
+ std::swap(status_, f.status_);
+ std::swap(data_, f.data_);
+ std::swap(send_data_, f.send_data_);
+ return *this;
+ }
+
+ bool valid() const{
+ return (bool)data_;
+ }
+
+ void wait(){
+ if(!valid())
+ DUNE_THROW(InvalidFutureException, "The MPIFuture is not valid!");
+ MPI_Wait(&req_, &status_);
+ }
+
+ bool ready() const{
+ int flag = -1;
+ MPI_Test(&req_, &flag, &status_);
+ return flag;
+ }
+
+ R get() {
+ wait();
+ return data_.get();
+ }
+
+ S get_send_data(){
+ wait();
+ return send_data_.get();
+ }
+
+ auto get_mpidata(){
+ return getMPIData(*data_);
+ }
+
+ auto get_send_mpidata(){
+ return getMPIData(*send_data_);
+ }
+ };
+
+}
+#endif
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+/**
+ * @file
+ * @brief Implements a MPIGuard which detects an error on a remote process
+ * @author Christian Engwer
+ * @ingroup ParallelCommunication
+ */
+
+#ifndef DUNE_COMMON_MPIGUARD_HH
+#define DUNE_COMMON_MPIGUARD_HH
+
+#include "mpihelper.hh"
+#include "communication.hh"
+#include "mpicommunication.hh"
+#include <dune/common/exceptions.hh>
+
+namespace Dune
+{
+
+#ifndef DOXYGEN
+
+ /*
+ Interface class for the communication needed by MPIGuard
+ */
+ struct GuardCommunicator
+ {
+ // cleanup
+ virtual ~GuardCommunicator() {};
+ // all the communication methods we need
+ virtual int rank() = 0;
+ virtual int size() = 0;
+ virtual int sum(int i) = 0;
+ // create a new GuardCommunicator pointer
+ template <class C>
+ static GuardCommunicator * create(const CollectiveCommunication<C> & c);
+#if HAVE_MPI
+ inline
+ static GuardCommunicator * create(const MPI_Comm & c);
+#endif
+ };
+
+ namespace {
+ /*
+ templated implementation of different communication classes
+ */
+ // the default class will always fail, due to the missing implementation of "sum"
+ template <class Imp>
+ struct GenericGuardCommunicator
+ : public GuardCommunicator
+ {};
+ // specialization for Communication
+ template <class T>
+ struct GenericGuardCommunicator< Communication<T> >
+ : public GuardCommunicator
+ {
+ const Communication<T> comm;
+ GenericGuardCommunicator(const Communication<T> & c) :
+ comm(c) {}
+ int rank() override { return comm.rank(); };
+ int size() override { return comm.size(); };
+ int sum(int i) override { return comm.sum(i); }
+ };
+
+#if HAVE_MPI
+ // specialization for MPI_Comm
+ template <>
+ struct GenericGuardCommunicator<MPI_Comm>
+ : public GenericGuardCommunicator< Communication<MPI_Comm> >
+ {
+ GenericGuardCommunicator(const MPI_Comm & c) :
+ GenericGuardCommunicator< Communication<MPI_Comm> >(
+ Communication<MPI_Comm>(c)) {}
+ };
+#endif
+ } // anonymous namespace
+
+ template<class C>
+ GuardCommunicator * GuardCommunicator::create(const CollectiveCommunication<C> & comm)
+ {
+ return new GenericGuardCommunicator< CollectiveCommunication<C> >(comm);
+ }
+
+#if HAVE_MPI
+ GuardCommunicator * GuardCommunicator::create(const MPI_Comm & comm)
+ {
+ return new GenericGuardCommunicator< CollectiveCommunication<MPI_Comm> >(comm);
+ }
+#endif
+
+#endif
+
+ /*! @brief This exception is thrown if the MPIGuard detects an error on a remote process
+ @ingroup ParallelCommunication
+ */
+ class MPIGuardError : public ParallelError {};
+
+ /*! @brief detects a thrown exception and communicates to all other processes
+ @ingroup ParallelCommunication
+
+ @code
+ {
+ MPIGuard guard(...);
+
+ do_something();
+
+ // tell the guard that you successfully passed a critical operation
+ guard.finalize();
+ // reactivate the guard for the next critical operation
+ guard.reactivate();
+
+ int result = do_something_else();
+
+ // tell the guard the result of your operation
+ guard.finalize(result == success);
+ }
+ @endcode
+
+ You create a MPIGuard object. If an exception is risen on a
+ process the MPIGuard detects the exception, because the finalize
+ method was not called. When reaching the finalize call all
+ other processes are informed that an error occurred and the
+ MPIGuard throws an exception of type MPIGuardError.
+
+ @note You can initialize the MPIGuard from different types of communication objects:
+ - MPIHelper
+ - Communication
+ - MPI_Comm
+ */
+ class MPIGuard
+ {
+ GuardCommunicator * comm_;
+ bool active_;
+
+ // we don't want to copy this class
+ MPIGuard (const MPIGuard &);
+
+ public:
+ /*! @brief create an MPIGuard operating on the Communicator of the global Dune::MPIHelper
+
+ @param active should the MPIGuard be active upon creation?
+ */
+ MPIGuard (bool active=true) :
+ comm_(GuardCommunicator::create(
+ MPIHelper::getCommunication())),
+ active_(active)
+ {}
+
+ /*! @brief create an MPIGuard operating on the Communicator of a special Dune::MPIHelper m
+
+ @param m a reference to an MPIHelper
+ @param active should the MPIGuard be active upon creation?
+ */
+ MPIGuard (MPIHelper & m, bool active=true) :
+ comm_(GuardCommunicator::create(
+ m.getCommunication())),
+ active_(active)
+ {}
+
+ /*! @brief create an MPIGuard operating on an arbitrary communicator.
+
+ Supported types for the communication object are:
+ - MPIHelper
+ - Communication
+ - MPI_Comm
+
+ @param comm reference to a communication object
+ @param active should the MPIGuard be active upon creation?
+ */
+ template <class C>
+ MPIGuard (const C & comm, bool active=true) :
+ comm_(GuardCommunicator::create(comm)),
+ active_(active)
+ {}
+
+#if HAVE_MPI
+ MPIGuard (const MPI_Comm & comm, bool active=true) :
+ comm_(GuardCommunicator::create(comm)),
+ active_(active)
+ {}
+#endif
+
+ /*! @brief destroy the guard and check for undetected exceptions
+ */
+ ~MPIGuard()
+ {
+ if (active_)
+ {
+ active_ = false;
+ finalize(false);
+ }
+ delete comm_;
+ }
+
+ /*! @brief reactivate the guard.
+
+ If the guard is still active finalize(true) is called first.
+ */
+ void reactivate() {
+ if (active_ == true)
+ finalize();
+ active_ = true;
+ }
+
+ /*! @brief stop the guard.
+
+ If no success parameter is passed, the guard assumes that
+ everything worked as planned. All errors are communicated
+ and an exception of type MPIGuardError is thrown if an error
+ (or exception) occurred on any of the processors in the
+ communicator.
+
+ @param success inform the guard about possible errors
+ */
+ void finalize(bool success = true)
+ {
+ int result = success ? 0 : 1;
+ bool was_active = active_;
+ active_ = false;
+ result = comm_->sum(result);
+ if (result>0 && was_active)
+ {
+ DUNE_THROW(MPIGuardError, "Terminating process "
+ << comm_->rank() << " due to "
+ << result << " remote error(s)");
+ }
+ }
+ };
+
+}
+
+#endif // DUNE_COMMON_MPIGUARD_HH
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_MPIHELPER
+#define DUNE_MPIHELPER
+
+#if HAVE_MPI
+#include <cassert>
+#include <mpi.h>
+#endif
+
+#include <mutex>
+
+#include <dune/common/parallel/communication.hh>
+#if HAVE_MPI
+#include <dune/common/parallel/mpicommunication.hh>
+#include <dune/common/stdstreams.hh>
+#endif
+#include <dune/common/visibility.hh>
+
+namespace Dune
+{
+ /**
+ * @file
+ * @brief Helpers for dealing with MPI.
+ *
+ * @ingroup ParallelCommunication
+ *
+ * Basically there are two helpers available:
+ * <dl>
+ * <dt>FakeMPIHelper</dt>
+ * <dd>A class adhering to the interface of MPIHelper
+ * that does not need MPI at all. This can be used
+ * to create a sequential program even if MPI is
+ * used to compile it.
+ * </dd>
+ * <dt>MPIHelper</dt>
+ * <dd>A real MPI helper. When the singleton
+ * gets instantiated MPI_Init will be
+ * called and before the program exits
+ * MPI_Finalize will be called.
+ * </dd>
+ * </dl>
+ *
+ * Example of who to use these classes:
+ *
+ * A program that is parallel if compiled with MPI
+ * and sequential otherwise:
+ * \code
+ * int main(int argc, char** argv){
+ * typedef Dune::MPIHelper MPIHelper;
+ * MPIHelper::instance(argc, argv);
+ * typename MPIHelper::MPICommunicator world =
+ * MPIHelper::getCommunicator();
+ * ...
+ * \endcode
+ *
+ * If one wants to have sequential program even if the code is
+ * compiled with mpi then one simply has to exchange the typedef
+ * with \code typedef Dune::MPIHelper FakeMPIHelper; \endcode.
+ *
+ * For checking whether we really use MPI or just fake please use
+ * MPIHelper::isFake (this is also possible at compile time!)
+ */
+ /**
+ * @brief A fake mpi helper.
+ *
+ * This helper can be used if no MPI is available
+ * or one wants to run sequentially even if MPI is
+ * available and used.
+ */
+ class FakeMPIHelper
+ {
+ public:
+ enum {
+ /**
+ * @brief Are we fake (i.e. pretend to have MPI support but are compiled
+ * without.)
+ */
+ isFake = true
+ };
+
+ /**
+ * @brief The type of the mpi communicator.
+ */
+ typedef No_Comm MPICommunicator;
+
+ /** \brief get the default communicator
+ *
+ * Return a communicator to exchange data with all processes
+ *
+ * \returns a fake communicator
+ */
+ DUNE_EXPORT static MPICommunicator getCommunicator ()
+ {
+ static MPICommunicator comm;
+ return comm;
+ }
+
+ /** \brief get a local communicator
+ *
+ * Returns a communicator to communicate with the local process only
+ *
+ * \returns a fake communicator
+ */
+ static MPICommunicator getLocalCommunicator ()
+ {
+ return getCommunicator();
+ }
+
+
+
+ // Will be deprecated after the 2.7 release
+ //[[deprecated("getCollectionCommunication is deprecated. Use getCommunication instead.")]]
+ static Communication<MPICommunicator> getCollectiveCommunication()
+ {
+ return Communication<MPICommunicator>(getCommunicator());
+ }
+
+ static Communication<MPICommunicator>
+ getCommunication()
+ {
+ return Communication<MPICommunicator>(getCommunicator());
+ }
+
+ /**
+ * @brief Get the singleton instance of the helper.
+ *
+ * This method has to be called with the same arguments
+ * that the main method of the program was called:
+ * \code
+ * int main(int argc, char** argv){
+ * MPIHelper::instance(argc, argv);
+ * // program code comes here
+ * ...
+ * }
+ * \endcode
+ * @param argc The number of arguments provided to main.
+ * @param argv The arguments provided to main.
+ */
+ DUNE_EXPORT static FakeMPIHelper& instance([[maybe_unused]] int argc,
+ [[maybe_unused]] char** argv)
+ {
+ return instance();
+ }
+
+ DUNE_EXPORT static FakeMPIHelper& instance()
+ {
+ static FakeMPIHelper singleton;
+ return singleton;
+ }
+
+ /**
+ * @brief return rank of process, i.e. zero
+ */
+ int rank () const { return 0; }
+ /**
+ * @brief return rank of process, i.e. one
+ */
+ int size () const { return 1; }
+
+ private:
+ FakeMPIHelper() {}
+ FakeMPIHelper(const FakeMPIHelper&);
+ FakeMPIHelper& operator=(const FakeMPIHelper);
+ };
+
+#if HAVE_MPI
+ /**
+ * @brief A real mpi helper.
+ * @ingroup ParallelCommunication
+ *
+ * This helper should be used for parallel programs.
+ */
+ class MPIHelper
+ {
+ public:
+ enum {
+ /**
+ * @brief Are we fake (i. e. pretend to have MPI support but are compiled
+ * without.
+ */
+ isFake = false
+ };
+
+ /**
+ * @brief The type of the mpi communicator.
+ */
+ typedef MPI_Comm MPICommunicator;
+
+ /** \brief get the default communicator
+ *
+ * Return a communicator to exchange data with all processes
+ *
+ * \returns MPI_COMM_WORLD
+ */
+ static MPICommunicator getCommunicator ()
+ {
+ return MPI_COMM_WORLD;
+ }
+
+ /** \brief get a local communicator
+ *
+ * Returns a communicator to exchange data with the local process only
+ *
+ * \returns MPI_COMM_SELF
+ */
+ static MPICommunicator getLocalCommunicator ()
+ {
+ return MPI_COMM_SELF;
+ }
+
+ // Will be deprecated after the 2.7 release
+ //[[deprecated("getCollectionCommunication is deprecated. Use getCommunication instead.")]]
+ static Communication<MPICommunicator>
+ getCollectiveCommunication()
+ {
+ return Communication<MPICommunicator>(getCommunicator());
+ }
+
+ static Communication<MPICommunicator>
+ getCommunication()
+ {
+ return Communication<MPICommunicator>(getCommunicator());
+ }
+ /**
+ * @brief Get the singleton instance of the helper.
+ *
+ * This method has to be called with the same arguments
+ * that the main method of the program was called:
+ * \code
+ * int main(int argc, char** argv){
+ * MPIHelper::instance(argc, argv);
+ * // program code comes here
+ * ...
+ * }
+ * \endcode
+ * @param argc The number of arguments provided to main.
+ * @param argv The arguments provided to main.
+ */
+ DUNE_EXPORT static MPIHelper& instance(int& argc, char**& argv)
+ {
+ // create singleton instance
+ if (!instance_){
+ static std::mutex mutex;
+ std::lock_guard<std::mutex> guard(mutex);
+ if(!instance_)
+ instance_.reset(new MPIHelper(argc,argv));
+ }
+ return *instance_;
+ }
+
+ DUNE_EXPORT static MPIHelper& instance()
+ {
+ if(!instance_)
+ DUNE_THROW(InvalidStateException, "MPIHelper not initialized! Call MPIHelper::instance(argc, argv) with arguments first.");
+ return *instance_;
+ }
+
+ /**
+ * @brief return rank of process
+ */
+ int rank () const { return rank_; }
+ /**
+ * @brief return number of processes
+ */
+ int size () const { return size_; }
+
+ //! \brief calls MPI_Finalize
+ ~MPIHelper()
+ {
+ int wasFinalized = -1;
+ MPI_Finalized( &wasFinalized );
+ if(!wasFinalized && initializedHere_)
+ {
+ MPI_Finalize();
+ dverb << "Called MPI_Finalize on p=" << rank_ << "!" <<std::endl;
+ }
+
+ }
+
+ private:
+ int rank_;
+ int size_;
+ bool initializedHere_;
+ void prevent_warning(int){}
+ static inline std::unique_ptr<MPIHelper> instance_ = {};
+
+ //! \brief calls MPI_Init with argc and argv as parameters
+ MPIHelper(int& argc, char**& argv)
+ : initializedHere_(false)
+ {
+ int wasInitialized = -1;
+ MPI_Initialized( &wasInitialized );
+ if(!wasInitialized)
+ {
+ rank_ = -1;
+ size_ = -1;
+ static int is_initialized = MPI_Init(&argc, &argv);
+ prevent_warning(is_initialized);
+ initializedHere_ = true;
+ }
+
+ MPI_Comm_rank(MPI_COMM_WORLD,&rank_);
+ MPI_Comm_size(MPI_COMM_WORLD,&size_);
+
+ assert( rank_ >= 0 );
+ assert( size_ >= 1 );
+
+ dverb << "Called MPI_Init on p=" << rank_ << "!" << std::endl;
+ }
+
+ MPIHelper(const MPIHelper&);
+ MPIHelper& operator=(const MPIHelper);
+ };
+#else // !HAVE_MPI
+ // We do not have MPI therefore FakeMPIHelper
+ // is the MPIHelper
+ /**
+ * @brief If no MPI is available FakeMPIHelper becomes the MPIHelper
+ * @ingroup ParallelCommunication
+ */
+ typedef FakeMPIHelper MPIHelper;
+
+#endif // !HAVE_MPI
+
+} // end namespace Dune
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+/**
+ * @file
+ *
+ * @brief See MPI_Pack.
+ *
+ * This Wrapper class takes care of the
+ * memory management and provides methods to pack and unpack
+ * objects. All objects that can be used for MPI communication can
+ * also be packed and unpacked to/from MPIPack.
+ *
+ * @author Nils-Arne Dreier
+ * @ingroup ParallelCommunication
+ */
+
+
+#ifndef DUNE_COMMON_PARALLEL_MPIPACK_HH
+#define DUNE_COMMON_PARALLEL_MPIPACK_HH
+
+#include <vector>
+#if HAVE_MPI
+#include <mpi.h>
+#include <dune/common/parallel/mpicommunication.hh>
+#include <dune/common/parallel/mpidata.hh>
+
+
+namespace Dune {
+
+ class MPIPack {
+ std::vector<char> _buffer;
+ int _position;
+ MPI_Comm _comm;
+
+ friend struct MPIData<MPIPack>;
+ friend struct MPIData<const MPIPack>;
+ public:
+ MPIPack(Communication<MPI_Comm> comm, std::size_t size = 0)
+ : _buffer(size)
+ , _position(0)
+ , _comm(comm)
+ {}
+
+ // Its not valid to copy a MPIPack but you can move it
+ MPIPack(const MPIPack&) = delete;
+ MPIPack& operator = (const MPIPack& other) = delete;
+ MPIPack(MPIPack&&) = default;
+ MPIPack& operator = (MPIPack&& other) = default;
+
+ /** @brief Packs the data into the object. Enlarges the internal buffer if
+ * necessary.
+ *
+ * @throw MPIError
+ */
+ template<class T>
+ void pack(const T& data){
+ auto mpidata = getMPIData(data);
+ int size = getPackSize(mpidata.size(), _comm, mpidata.type());
+ constexpr bool has_static_size = decltype(getMPIData(std::declval<T&>()))::static_size;
+ if(!has_static_size)
+ size += getPackSize(1, _comm, MPI_INT);
+ if (_position + size > 0 && size_t(_position + size) > _buffer.size()) // resize buffer if necessary
+ _buffer.resize(_position + size);
+ if(!has_static_size){
+ int size = mpidata.size();
+ MPI_Pack(&size, 1, MPI_INT, _buffer.data(), _buffer.size(),
+ &_position, _comm);
+ }
+ MPI_Pack(mpidata.ptr(), mpidata.size(),
+ mpidata.type(), _buffer.data(), _buffer.size(),
+ &_position, _comm);
+ }
+
+ /** @brief Unpacks data from the object
+ *
+ * @throw MPIError
+ */
+ template<class T>
+ auto /*void*/ unpack(T& data)
+ -> std::enable_if_t<decltype(getMPIData(data))::static_size, void>
+ {
+ auto mpidata = getMPIData(data);
+ MPI_Unpack(_buffer.data(), _buffer.size(), &_position,
+ mpidata.ptr(), mpidata.size(),
+ mpidata.type(), _comm);
+ }
+
+ /** @brief Unpacks data from the object
+ *
+ * @throw MPIError
+ */
+ template<class T>
+ auto /*void*/ unpack(T& data)
+ -> std::enable_if_t<!decltype(getMPIData(data))::static_size, void>
+ {
+ auto mpidata = getMPIData(data);
+ int size = 0;
+ MPI_Unpack(_buffer.data(), _buffer.size(), &_position,
+ &size, 1,
+ MPI_INT, _comm);
+ mpidata.resize(size);
+ MPI_Unpack(_buffer.data(), _buffer.size(), &_position,
+ mpidata.ptr(), mpidata.size(),
+ mpidata.type(), _comm);
+ }
+
+
+ //! @copydoc pack
+ template<typename T>
+ friend MPIPack& operator << (MPIPack& p, const T& t){
+ p.pack(t);
+ return p;
+ }
+
+ //! @copydoc unpack
+ template<typename T>
+ friend MPIPack& operator >> (MPIPack& p, T& t){
+ p.unpack(t);
+ return p;
+ }
+
+ //! @copydoc unpack
+ template<typename T>
+ MPIPack& read(T& t){
+ unpack(t);
+ return *this;
+ }
+
+ //! @copydoc pack
+ template<typename T>
+ MPIPack& write(const T& t){
+ pack(t);
+ return *this;
+ }
+
+ /** @brief Resizes the internal buffer.
+ \param size new size of internal buffer
+ */
+ void resize(size_t size){
+ _buffer.resize(size);
+ }
+
+ /** @brief Enlarges the internal buffer.
+ */
+ void enlarge(int s) {
+ _buffer.resize(_buffer.size() + s);
+ }
+
+ /** @brief Returns the size of the internal buffer.
+ */
+ size_t size() const {
+ return _buffer.size();
+ }
+
+ /** @brief Sets the position in the buffer where the next
+ * pack/unpack operation should take place.
+ */
+ void seek(int p){
+ _position = p;
+ }
+
+ /** @brief Gets the position in the buffer where the next
+ * pack/unpack operation should take place.
+ */
+ int tell() const{
+ return _position;
+ }
+
+ /** @brief Checks whether the end of the buffer is reached.
+ */
+ bool eof() const{
+ return std::size_t(_position)==_buffer.size();
+ }
+
+ /** @brief Returns the size of the data needed to store the data
+ * in an MPIPack. See `MPI_Pack_size`.
+ */
+ static int getPackSize(int len, const MPI_Comm& comm, const MPI_Datatype& dt){
+ int size;
+ MPI_Pack_size(len, dt, comm, &size);
+ return size;
+ }
+
+ friend bool operator==(const MPIPack& a, const MPIPack& b) {
+ return a._buffer == b._buffer && a._comm == b._comm;
+ }
+ friend bool operator!=(const MPIPack& a, const MPIPack& b) {
+ return !(a==b);
+ }
+
+ };
+
+ template<class P>
+ struct MPIData<P, std::enable_if_t<std::is_same<std::remove_const_t<P>, MPIPack>::value>> {
+ protected:
+ friend auto getMPIData<P>(P& t);
+ MPIData(P& t) :
+ data_(t)
+ {}
+ public:
+ static constexpr bool static_size = std::is_const<P>::value;
+
+ void* ptr() {
+ return (void*) data_._buffer.data();
+ }
+
+ int size() {
+ return data_.size();
+ }
+
+ MPI_Datatype type() const{
+ return MPI_PACKED;
+ }
+
+ void resize(int size){
+ data_.resize(size);
+ }
+ protected:
+ P& data_;
+ };
+
+} // end namespace Dune
+
+#endif
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_MPITRAITS_HH
+#define DUNE_MPITRAITS_HH
+
+/** @addtogroup ParallelCommunication
+ *
+ * @{
+ */
+/**
+ * @file
+ * @brief Traits classes for mapping types onto MPI_Datatype.
+ * @author Markus Blatt
+ */
+
+#if HAVE_MPI
+
+#include <cstddef>
+#include <cstdint>
+#include <type_traits>
+#include <utility>
+#include <complex>
+
+#include <mpi.h>
+
+namespace Dune
+{
+ /**
+ * @brief A traits class describing the mapping of types onto MPI_Datatypes.
+ *
+ * Specializations exist for the default types.
+ * Specializations should provide a static method
+ * \code
+ * static MPI_Datatype getType();
+ * \endcode
+ */
+ template<typename T>
+ struct MPITraits
+ {
+ private:
+ MPITraits(){}
+ MPITraits(const MPITraits&){}
+ static MPI_Datatype datatype;
+ static MPI_Datatype vectortype;
+ public:
+ static inline MPI_Datatype getType()
+ {
+ if(datatype==MPI_DATATYPE_NULL) {
+ MPI_Type_contiguous(sizeof(T),MPI_BYTE,&datatype);
+ MPI_Type_commit(&datatype);
+ }
+ return datatype;
+ }
+ static constexpr bool is_intrinsic = false;
+ };
+ template<class T>
+ MPI_Datatype MPITraits<T>::datatype = MPI_DATATYPE_NULL;
+
+#ifndef DOXYGEN
+
+ // A Macro for defining traits for the primitive data types
+#define ComposeMPITraits(p,m) \
+ template<> \
+ struct MPITraits<p>{ \
+ static inline MPI_Datatype getType(){ \
+ return m; \
+ } \
+ static constexpr bool is_intrinsic = true; \
+ }
+
+ ComposeMPITraits(char, MPI_CHAR);
+ ComposeMPITraits(unsigned char,MPI_UNSIGNED_CHAR);
+ ComposeMPITraits(short,MPI_SHORT);
+ ComposeMPITraits(unsigned short,MPI_UNSIGNED_SHORT);
+ ComposeMPITraits(int,MPI_INT);
+ ComposeMPITraits(unsigned int,MPI_UNSIGNED);
+ ComposeMPITraits(long,MPI_LONG);
+ ComposeMPITraits(unsigned long,MPI_UNSIGNED_LONG);
+ ComposeMPITraits(float,MPI_FLOAT);
+ ComposeMPITraits(double,MPI_DOUBLE);
+ ComposeMPITraits(long double,MPI_LONG_DOUBLE);
+ ComposeMPITraits(std::complex<double>, MPI_CXX_DOUBLE_COMPLEX);
+ ComposeMPITraits(std::complex<long double>, MPI_CXX_LONG_DOUBLE_COMPLEX);
+ ComposeMPITraits(std::complex<float>, MPI_CXX_FLOAT_COMPLEX);
+
+
+#undef ComposeMPITraits
+
+ template<class K, int n> class FieldVector;
+
+ template<class K, int n>
+ struct MPITraits<FieldVector<K,n> >
+ {
+ static MPI_Datatype datatype;
+ static MPI_Datatype vectortype;
+
+ static inline MPI_Datatype getType()
+ {
+ if(datatype==MPI_DATATYPE_NULL) {
+ MPI_Type_contiguous(n, MPITraits<K>::getType(), &vectortype);
+ MPI_Type_commit(&vectortype);
+ FieldVector<K,n> fvector;
+ MPI_Aint base;
+ MPI_Aint displ;
+ MPI_Get_address(&fvector, &base);
+ MPI_Get_address(&(fvector[0]), &displ);
+ displ -= base;
+ int length[1]={1};
+
+ MPI_Type_create_struct(1, length, &displ, &vectortype, &datatype);
+ MPI_Type_commit(&datatype);
+ }
+ return datatype;
+ }
+
+ };
+
+ template<class K, int n>
+ MPI_Datatype MPITraits<FieldVector<K,n> >::datatype = MPI_DATATYPE_NULL;
+ template<class K, int n>
+ MPI_Datatype MPITraits<FieldVector<K,n> >::vectortype = {MPI_DATATYPE_NULL};
+
+
+ template<int k>
+ class bigunsignedint;
+
+ template<int k>
+ struct MPITraits<bigunsignedint<k> >
+ {
+ static MPI_Datatype datatype;
+ static MPI_Datatype vectortype;
+
+ static inline MPI_Datatype getType()
+ {
+ if(datatype==MPI_DATATYPE_NULL) {
+ MPI_Type_contiguous(bigunsignedint<k>::n, MPITraits<std::uint16_t>::getType(),
+ &vectortype);
+ //MPI_Type_commit(&vectortype);
+ bigunsignedint<k> data;
+ MPI_Aint base;
+ MPI_Aint displ;
+ MPI_Get_address(&data, &base);
+ MPI_Get_address(&(data.digit), &displ);
+ displ -= base;
+ int length[1]={1};
+ MPI_Type_create_struct(1, length, &displ, &vectortype, &datatype);
+ MPI_Type_commit(&datatype);
+ }
+ return datatype;
+ }
+ };
+}
+
+namespace Dune
+{
+ template<int k>
+ MPI_Datatype MPITraits<bigunsignedint<k> >::datatype = MPI_DATATYPE_NULL;
+ template<int k>
+ MPI_Datatype MPITraits<bigunsignedint<k> >::vectortype = MPI_DATATYPE_NULL;
+
+ template<typename T1, typename T2>
+ struct MPITraits<std::pair<T1,T2 > >
+ {
+ public:
+ inline static MPI_Datatype getType();
+ private:
+ static MPI_Datatype type;
+ };
+ template<typename T1, typename T2>
+ MPI_Datatype MPITraits<std::pair<T1,T2> >::getType()
+ {
+ if(type==MPI_DATATYPE_NULL) {
+ int length[2] = {1, 1};
+ MPI_Aint disp[2];
+ MPI_Datatype types[2] = {MPITraits<T1>::getType(),
+ MPITraits<T2>::getType()};
+
+ using Pair = std::pair<T1, T2>;
+ static_assert(std::is_standard_layout<Pair>::value, "offsetof() is only defined for standard layout types");
+ disp[0] = offsetof(Pair, first);
+ disp[1] = offsetof(Pair, second);
+
+ MPI_Datatype tmp;
+ MPI_Type_create_struct(2, length, disp, types, &tmp);
+
+ MPI_Type_create_resized(tmp, 0, sizeof(Pair), &type);
+ MPI_Type_commit(&type);
+
+ MPI_Type_free(&tmp);
+ }
+ return type;
+ }
+
+ template<typename T1, typename T2>
+ MPI_Datatype MPITraits<std::pair<T1,T2> >::type=MPI_DATATYPE_NULL;
+
+#endif // !DOXYGEN
+
+} // namespace Dune
+
+#endif // HAVE_MPI
+
+/** @} group ParallelCommunication */
+
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+#ifndef DUNE_PLOCALINDEX_HH
+#define DUNE_PLOCALINDEX_HH
+
+#include "localindex.hh"
+#include "indexset.hh"
+#include "mpitraits.hh"
+
+#include <iostream>
+
+namespace Dune
+{
+
+
+ /** @addtogroup Common_Parallel
+ *
+ * @{
+ */
+ /**
+ * @file
+ * @brief Provides classes for use as the local index in ParallelIndexSet for distributed computing.
+ * @author Markus Blatt
+ */
+
+ template<class T> class ParallelLocalIndex;
+
+ /**
+ * @brief Print the local index to a stream.
+ * @param os The output stream to print to.
+ * @param index The index to print.
+ */
+ template<class T>
+ std::ostream& operator<<(std::ostream& os, const ParallelLocalIndex<T>& index)
+ {
+ os<<"{local="<<index.localIndex_<<", attr="<<T(index.attribute_)<<", public="
+ <<(index.public_ ? true : false)<<"}";
+ return os;
+ }
+
+ /**
+ * @brief An index present on the local process with an additional attribute flag.
+ */
+ template<typename T>
+ class ParallelLocalIndex
+ {
+#if HAVE_MPI
+ // friend declaration needed for MPITraits
+ friend struct MPITraits<ParallelLocalIndex<T> >;
+#endif
+ friend std::ostream& operator<<<>(std::ostream& os, const ParallelLocalIndex<T>& index);
+
+ public:
+ /**
+ * @brief The type of the attributes.
+ * Normally this will be an enumeration like
+ * <pre>
+ * enum Attributes{owner, border, overlap};
+ * </pre>
+ */
+ typedef T Attribute;
+ /**
+ * @brief Constructor.
+ *
+ * The local index will be initialized to 0.
+ * @param attribute The attribute of the index.
+ * @param isPublic True if the index might also be
+ * known to other processes.
+ */
+ ParallelLocalIndex(const Attribute& attribute, bool isPublic);
+
+ /**
+ * @brief Constructor.
+ *
+ * @param localIndex The local index.
+ * @param attribute The attribute of the index.
+ * @param isPublic True if the index might also be
+ * known to other processes.
+ */
+ ParallelLocalIndex(size_t localIndex, const Attribute& attribute, bool isPublic=true);
+ /**
+ * @brief Parameterless constructor.
+ *
+ * Needed for use in container classes.
+ */
+ ParallelLocalIndex();
+
+#if 0
+ /**
+ * @brief Constructor.
+ * @param globalIndex The global index.
+ * @param attribute The attribute of the index.
+ * @param local The local index.
+ * @param isPublic True if the index might also be
+ * known to other processes.
+ *
+ */
+ ParallelLocalIndex(const Attribute& attribute, size_t local, bool isPublic);
+#endif
+
+ /**
+ * @brief Get the attribute of the index.
+ * @return The associated attribute.
+ */
+ inline const Attribute attribute() const;
+
+ /**
+ * @brief Set the attribute of the index.
+ * @param attribute The associated attribute.
+ */
+ inline void setAttribute(const Attribute& attribute);
+
+ /**
+ * @brief get the local index.
+ * @return The local index.
+ */
+ inline size_t local() const;
+
+ /**
+ * @brief Convert to the local index represented by an int.
+ */
+ inline operator size_t() const;
+
+ /**
+ * @brief Assign a new local index.
+ *
+ * @param index The new local index.
+ */
+ inline ParallelLocalIndex<Attribute>& operator=(size_t index);
+
+ /**
+ * @brief Check whether the index might also be known other processes.
+ * @return True if the index might be known to other processors.
+ */
+ inline bool isPublic() const;
+
+ /**
+ * @brief Get the state.
+ * @return The state.
+ */
+ inline LocalIndexState state() const;
+
+ /**
+ * @brief Set the state.
+ * @param state The state to set.
+ */
+ inline void setState(const LocalIndexState& state);
+
+ private:
+ /** @brief The local index. */
+ size_t localIndex_;
+
+ /** @brief An attribute for the index. */
+ char attribute_;
+
+ /** @brief True if the index is also known to other processors. */
+ char public_;
+
+ /**
+ * @brief The state of the index.
+ *
+ * Has to be one of LocalIndexState!
+ * @see LocalIndexState.
+ */
+ char state_;
+
+ };
+
+ template<typename T>
+ bool operator==(const ParallelLocalIndex<T>& p1,
+ const ParallelLocalIndex<T>& p2)
+ {
+ if(p1.local()!=p2.local())
+ return false;
+ if(p1.attribute()!=p2.attribute())
+ return false;
+ if(p1.isPublic()!=p2.isPublic())
+ return false;
+ return true;
+ }
+ template<typename T>
+ bool operator!=(const ParallelLocalIndex<T>& p1,
+ const ParallelLocalIndex<T>& p2)
+ {
+ return !(p1==p2);
+ }
+
+
+ template<typename T>
+ struct LocalIndexComparator<ParallelLocalIndex<T> >
+ {
+ static bool compare(const ParallelLocalIndex<T>& t1,
+ const ParallelLocalIndex<T>& t2){
+ return t1.attribute()<t2.attribute();
+ }
+ };
+
+
+#if HAVE_MPI
+
+ //! \todo Please doc me!
+ template<typename T>
+ class MPITraits<ParallelLocalIndex<T> >
+ {
+ public:
+ static MPI_Datatype getType();
+ private:
+ static MPI_Datatype type;
+
+ };
+
+#endif
+
+ template<class T>
+ ParallelLocalIndex<T>::ParallelLocalIndex(const T& attribute, bool isPublic)
+ : localIndex_(0), attribute_(static_cast<char>(attribute)),
+ public_(static_cast<char>(isPublic)), state_(static_cast<char>(VALID))
+ {}
+
+
+ template<class T>
+ ParallelLocalIndex<T>::ParallelLocalIndex(size_t local, const T& attribute, bool isPublic)
+ : localIndex_(local), attribute_(static_cast<char>(attribute)),
+ public_(static_cast<char>(isPublic)), state_(static_cast<char>(VALID))
+ {}
+
+ template<class T>
+ ParallelLocalIndex<T>::ParallelLocalIndex()
+ : localIndex_(0), attribute_(), public_(static_cast<char>(false)),
+ state_(static_cast<char>(VALID))
+ {}
+
+ template<class T>
+ inline const T ParallelLocalIndex<T>::attribute() const
+ {
+ return T(attribute_);
+ }
+
+ template<class T>
+ inline void
+ ParallelLocalIndex<T>::setAttribute(const Attribute& attribute)
+ {
+ attribute_ = attribute;
+ }
+
+ template<class T>
+ inline size_t ParallelLocalIndex<T>::local() const
+ {
+ return localIndex_;
+ }
+
+ template<class T>
+ inline ParallelLocalIndex<T>::operator size_t() const
+ {
+ return localIndex_;
+ }
+
+ template<class T>
+ inline ParallelLocalIndex<T>&
+ ParallelLocalIndex<T>::operator=(size_t index)
+ {
+ localIndex_=index;
+ return *this;
+ }
+
+ template<class T>
+ inline bool ParallelLocalIndex<T>::isPublic() const
+ {
+ return static_cast<bool>(public_);
+ }
+
+ template<class T>
+ inline LocalIndexState ParallelLocalIndex<T>::state() const
+ {
+ return LocalIndexState(state_);
+ }
+
+ template<class T>
+ inline void ParallelLocalIndex<T>::setState(const LocalIndexState& state)
+ {
+ state_=static_cast<char>(state);
+ }
+
+#if HAVE_MPI
+
+ template<typename T>
+ MPI_Datatype MPITraits<ParallelLocalIndex<T> >::getType()
+ {
+
+ if(type==MPI_DATATYPE_NULL) {
+ int length = 1;
+ MPI_Aint base, disp;
+ MPI_Datatype types[1] = {MPITraits<char>::getType()};
+ ParallelLocalIndex<T> rep;
+ MPI_Get_address(&rep, &base);
+ MPI_Get_address(&(rep.attribute_), &disp);
+ disp -= base;
+
+ MPI_Datatype tmp;
+ MPI_Type_create_struct(1, &length, &disp, types, &tmp);
+
+ MPI_Type_create_resized(tmp, 0, sizeof(ParallelLocalIndex<T>), &type);
+ MPI_Type_commit(&type);
+
+ MPI_Type_free(&tmp);
+ }
+ return type;
+ }
+
+ template<typename T>
+ MPI_Datatype MPITraits<ParallelLocalIndex<T> >::type = MPI_DATATYPE_NULL;
+
+#endif
+
+
+ /** @} */
+} // namespace Dune
+
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_REMOTEINDICES_HH
+#define DUNE_REMOTEINDICES_HH
+
+#if HAVE_MPI
+
+#include <cassert>
+#include <iostream>
+#include <ostream>
+#include <map>
+#include <memory>
+#include <set>
+#include <utility>
+#include <vector>
+
+#include <mpi.h>
+
+#include <dune/common/exceptions.hh>
+#include <dune/common/parallel/indexset.hh>
+#include <dune/common/parallel/mpitraits.hh>
+#include <dune/common/parallel/plocalindex.hh>
+#include <dune/common/sllist.hh>
+#include <dune/common/stdstreams.hh>
+
+namespace Dune {
+ /** @addtogroup Common_Parallel
+ *
+ * @{
+ */
+ /**
+ * @file
+ * @brief Classes describing a distributed indexset
+ * @author Markus Blatt
+ */
+
+ //! \todo Please doc me!
+ template<typename TG, typename TA>
+ class MPITraits<IndexPair<TG,ParallelLocalIndex<TA> > >
+ {
+ public:
+ inline static MPI_Datatype getType();
+ private:
+ static MPI_Datatype type;
+ };
+
+
+ template<typename T, typename A>
+ class RemoteIndices;
+
+ template<typename T1, typename T2>
+ class RemoteIndex;
+
+ // forward declaration needed for friend declaration.
+ template<typename T>
+ class IndicesSyncer;
+
+ template<typename T1, typename T2>
+ std::ostream& operator<<(std::ostream& os, const RemoteIndex<T1,T2>& index);
+
+
+ template<typename T, typename A, bool mode>
+ class RemoteIndexListModifier;
+
+
+ /**
+ * @brief Information about an index residing on another processor.
+ */
+ template<typename T1, typename T2>
+ class RemoteIndex
+ {
+ template<typename T>
+ friend class IndicesSyncer;
+
+ template<typename T, typename A, typename A1>
+ friend void repairLocalIndexPointers(std::map<int,SLList<std::pair<typename T::GlobalIndex, typename T::LocalIndex::Attribute>,A> >&,
+ RemoteIndices<T,A1>&,
+ const T&);
+
+ template<typename T, typename A, bool mode>
+ friend class RemoteIndexListModifier;
+
+ public:
+ /**
+ * @brief the type of the global index.
+ * This type has to provide at least a operator< for sorting.
+ */
+ typedef T1 GlobalIndex;
+ /**
+ * @brief The type of the attributes.
+ * Normally this will be an enumeration like
+ * \code
+ * enum Attributes{owner, border, overlap}
+ * \endcode
+ * e.g. OwnerOverlapCopyAttributes.
+ */
+ typedef T2 Attribute;
+
+ /**
+ * @brief The type of the index pair.
+ */
+ typedef IndexPair<GlobalIndex,ParallelLocalIndex<Attribute> >
+ PairType;
+
+ /**
+ * @brief Get the attribute of the index on the remote process.
+ * @return The remote attribute.
+ */
+ const Attribute attribute() const;
+
+ /**
+ * @brief Get the corresponding local index pair.
+ * @return The corresponding local index pair.
+ */
+
+ const PairType& localIndexPair() const;
+
+ /**
+ * @brief Parameterless Constructor.
+ */
+ RemoteIndex();
+
+
+ /**
+ * @brief Constructor.
+ * @param attribute The attribute of the index on the remote processor.
+ * @param local The corresponding local index.
+ */
+ RemoteIndex(const T2& attribute,
+ const PairType* local);
+
+
+ /**
+ * @brief Constructor.
+ * Private as it should only be called from within Indexset.
+ * @param attribute The attribute of the index on the remote processor.
+ */
+ RemoteIndex(const T2& attribute);
+
+ bool operator==(const RemoteIndex& ri) const;
+
+ bool operator!=(const RemoteIndex& ri) const;
+ private:
+ /** @brief The corresponding local index for this process. */
+ const PairType* localIndex_;
+
+ /** @brief The attribute of the index on the other process. */
+ char attribute_;
+ };
+
+ template<class T, class A>
+ std::ostream& operator<<(std::ostream& os, const RemoteIndices<T,A>& indices);
+
+ class InterfaceBuilder;
+
+ template<class T, class A>
+ class CollectiveIterator;
+
+ // forward declaration needed for friend declaration.
+ template<class T>
+ class IndicesSyncer;
+
+ // forward declaration needed for friend declaration.
+ template<typename T1, typename T2>
+ class OwnerOverlapCopyCommunication;
+
+
+ /**
+ * @brief The indices present on remote processes.
+ *
+ * To set up communication between the set of processes active in
+ * the communication every process needs to know which
+ * indices are also known to other processes and which attributes
+ * are attached to them on the remote side.
+ *
+ * This information is managed by this class. The information can either
+ * be computed automatically calling rebuild (which requires information
+ * to be sent in a ring) or set up by hand using the
+ * RemoteIndexListModifiers returned by function getModifier(int).
+ *
+ * @tparam T The type of the underlying index set.
+ * @tparam A The type of the allocator to use.
+ */
+ template<class T, class A=std::allocator<RemoteIndex<typename T::GlobalIndex,
+ typename T::LocalIndex::Attribute> > >
+ class RemoteIndices
+ {
+ friend class InterfaceBuilder;
+ friend class IndicesSyncer<T>;
+ template<typename T1, typename A2, typename A1>
+ friend void repairLocalIndexPointers(std::map<int,SLList<std::pair<typename T1::GlobalIndex, typename T1::LocalIndex::Attribute>,A2> >&,
+ RemoteIndices<T1,A1>&,
+ const T1&);
+
+ template<class G, class T1, class T2>
+ friend void fillIndexSetHoles(const G& graph, Dune::OwnerOverlapCopyCommunication<T1,T2>& oocomm);
+ friend std::ostream& operator<<<>(std::ostream&, const RemoteIndices<T>&);
+
+ public:
+
+ /**
+ * @brief Type of the index set we use, e.g. ParallelLocalIndexSet.
+ */
+ typedef T ParallelIndexSet;
+
+ /**
+ * @brief The type of the collective iterator over all remote indices. */
+ typedef CollectiveIterator<T,A> CollectiveIteratorT;
+
+ /**
+ * @brief The type of the global index.
+ */
+ typedef typename ParallelIndexSet::GlobalIndex GlobalIndex;
+
+
+ /**
+ * @brief The type of the local index.
+ */
+ typedef typename ParallelIndexSet::LocalIndex LocalIndex;
+
+ /**
+ * @brief The type of the attribute.
+ */
+ typedef typename LocalIndex::Attribute Attribute;
+
+ /**
+ * @brief Type of the remote indices we manage.
+ */
+ typedef Dune::RemoteIndex<GlobalIndex,Attribute> RemoteIndex;
+
+
+ /**
+ * @brief The type of the allocator for the remote index list.
+ */
+ using Allocator = typename std::allocator_traits<A>::template rebind_alloc<RemoteIndex>;
+
+ /** @brief The type of the remote index list. */
+ typedef Dune::SLList<RemoteIndex,Allocator>
+ RemoteIndexList;
+
+ /** @brief The type of the map from rank to remote index list. */
+ typedef std::map<int, std::pair<RemoteIndexList*,RemoteIndexList*> >
+ RemoteIndexMap;
+
+ typedef typename RemoteIndexMap::const_iterator const_iterator;
+
+ /**
+ * @brief Constructor.
+ * @param comm The communicator to use.
+ * @param source The indexset which represents the global to
+ * local mapping at the source of the communication
+ * @param destination The indexset to which the communication
+ * which represents the global to
+ * local mapping at the destination of the communication.
+ * May be the same as the source indexset.
+ * @param neighbours Optional: The neighbours the process shares indices with.
+ * If this parameter is omitted a ring communication with all indices will take
+ * place to calculate this information which is O(P).
+ * @param includeSelf If true, sending from indices of the processor to other
+ * indices on the same processor is enabled even if the same indexset is used
+ * on both the
+ * sending and receiving side.
+ */
+ inline RemoteIndices(const ParallelIndexSet& source, const ParallelIndexSet& destination,
+ const MPI_Comm& comm, const std::vector<int>& neighbours=std::vector<int>(), bool includeSelf=false);
+
+ RemoteIndices();
+
+ /**
+ * @brief Tell whether sending from indices of the processor to other
+ * indices on the same processor is enabled even if the same indexset is
+ * used on both the sending and receiving side.
+ *
+ * @param includeSelf If true it is enabled.
+ */
+ void setIncludeSelf(bool includeSelf);
+
+ /**
+ * @brief Set the index sets and communicator we work with.
+ *
+ * @warning All remote indices already setup will be deleted!
+ *
+ * @param comm The communicator to use.
+ * @param source The indexset which represents the global to
+ * local mapping at the source of the communication
+ * @param destination The indexset to which the communication
+ * which represents the global to
+ * local mapping at the destination of the communication.
+ * May be the same as the source indexset.
+ * @param neighbours Optional: The neighbours the process shares indices with.
+ * If this parameter is omitted a ring communication with all indices will take
+ * place to calculate this information which is O(P).
+ */
+ void setIndexSets(const ParallelIndexSet& source, const ParallelIndexSet& destination,
+ const MPI_Comm& comm, const std::vector<int>& neighbours=std::vector<int>());
+
+ template<typename C>
+ void setNeighbours(const C& neighbours)
+ {
+ neighbourIds.clear();
+ neighbourIds.insert(neighbours.begin(), neighbours.end());
+
+ }
+
+ const std::set<int>& getNeighbours() const
+ {
+ return neighbourIds;
+ }
+
+ /**
+ * @brief Destructor.
+ */
+ ~RemoteIndices();
+
+ /**
+ * @brief Rebuilds the set of remote indices.
+ *
+ * This has to be called whenever the underlying index sets
+ * change.
+ *
+ * If the template parameter ignorePublic is true all indices will be treated
+ * as public.
+ */
+ template<bool ignorePublic>
+ void rebuild();
+
+ bool operator==(const RemoteIndices& ri) const;
+
+ /**
+ * @brief Checks whether the remote indices are synced with
+ * the indexsets.
+ *
+ * If they are not synced the remote indices need to be rebuild.
+ * @return True if they are synced.
+ */
+ inline bool isSynced() const;
+
+ /**
+ * @brief Get the mpi communicator used.
+ */
+ inline MPI_Comm communicator() const;
+
+ /**
+ * @brief Get a modifier for a remote index list.
+ *
+ * Sometimes the user knows in advance which indices will be present
+ * on other processors, too. Then he can set them up using this modifier.
+ *
+ * @warning Use with care. If the remote index list is inconsistent
+ * after the modification the communication might result in a dead lock!
+ *
+ * @tparam mode If true the index set corresponding to the remote indices might get modified.
+ * Therefore the internal pointers to the indices need to be repaired.
+ * @tparam send If true the remote index information at the sending side will
+ * be modified, if false the receiving side.
+ */
+ template<bool mode, bool send>
+ inline RemoteIndexListModifier<T,A,mode> getModifier(int process);
+
+ /**
+ * @brief Find an iterator over the remote index lists of a specific process.
+ * @param proc The identifier of the process.
+ * @return The iterator the remote index lists postioned at the process.
+ * If theres is no list for this process, the end iterator is returned.
+ */
+ inline const_iterator find(int proc) const;
+
+ /**
+ * @brief Get an iterator over all remote index lists.
+ * @return The iterator over all remote index lists postioned at the first process.
+ */
+ inline const_iterator begin() const;
+
+ /**
+ * @brief Get an iterator over all remote index lists.
+ * @return The iterator over all remote index lists postioned at the end.
+ */
+ inline const_iterator end() const;
+
+ /**
+ * @brief Get an iterator for colletively iterating over the remote indices of all remote processes.
+ */
+ template<bool send>
+ inline CollectiveIteratorT iterator() const;
+
+ /**
+ * @brief Free the index lists.
+ */
+ inline void free();
+
+ /**
+ * @brief Get the number of processors we share indices with.
+ * @return The number of neighbours.
+ */
+ inline int neighbours() const;
+
+ /** @brief Get the index set at the source. */
+ inline const ParallelIndexSet& sourceIndexSet() const;
+
+ /** @brief Get the index set at destination. */
+ inline const ParallelIndexSet& destinationIndexSet() const;
+
+ private:
+ /** copying is forbidden. */
+ RemoteIndices(const RemoteIndices&) = delete;
+
+ /** @brief Index set used at the source of the communication. */
+ const ParallelIndexSet* source_;
+
+ /** @brief Index set used at the destination of the communication. */
+ const ParallelIndexSet* target_;
+
+ /** @brief The communicator to use.*/
+ MPI_Comm comm_;
+
+ /** @brief The neighbours we share indices with.
+ * If not empty this will speedup rebuild. */
+ std::set<int> neighbourIds;
+
+ /** @brief The communicator tag to use. */
+ const static int commTag_=333;
+
+ /**
+ * @brief The sequence number of the source index set when the remote indices
+ * where build.
+ */
+ int sourceSeqNo_;
+
+ /**
+ * @brief The sequence number of the destination index set when the remote indices
+ * where build.
+ */
+ int destSeqNo_;
+
+ /**
+ * @brief Whether the public flag was ignored during the build.
+ */
+ bool publicIgnored;
+
+ /**
+ * @brief Whether the next build will be the first build ever.
+ */
+ bool firstBuild;
+
+ /*
+ * @brief If true, sending from indices of the processor to other
+ * indices on the same processor is enabled even if the same indexset is used
+ * on both the
+ * sending and receiving side.
+ */
+ bool includeSelf;
+
+ /** @brief The index pair type. */
+ typedef IndexPair<GlobalIndex, LocalIndex>
+ PairType;
+
+ /**
+ * @brief The remote indices.
+ *
+ * The key is the process id and the values are the pair of remote
+ * index lists, the first for receiving, the second for sending.
+ */
+ RemoteIndexMap remoteIndices_;
+
+ /**
+ * @brief Build the remote mapping.
+ *
+ * If the template parameter ignorePublic is true all indices will be treated
+ * as public.
+ * @param includeSelf If true, sending from indices of the processor to other
+ * indices on the same processor is enabled even if the same indexset is used
+ * on both the
+ * sending and receiving side.
+ */
+ template<bool ignorePublic>
+ inline void buildRemote(bool includeSelf);
+
+ /**
+ * @brief Count the number of public indices in an index set.
+ * @param indexSet The index set whose indices we count.
+ * @return the number of indices marked as public.
+ */
+ inline int noPublic(const ParallelIndexSet& indexSet);
+
+ /**
+ * @brief Pack the indices to send if source_ and target_ are the same.
+ *
+ * If the template parameter ignorePublic is true all indices will be treated
+ * as public.
+ * @param myPairs Array to store references to the public indices in.
+ * @param p_out The output buffer to pack the entries to.
+ * @param type The mpi datatype for the pairs.
+ * @param bufferSize The size of the output buffer p_out.
+ * @param position The position to start packing.
+ */
+ template<bool ignorePublic>
+ inline void packEntries(PairType** myPairs, const ParallelIndexSet& indexSet,
+ char* p_out, MPI_Datatype type, int bufferSize,
+ int* position, int n);
+
+ /**
+ * @brief unpacks the received indices and builds the remote index list.
+ *
+ * @param remote The list to add the indices to.
+ * @param remoteEntries The number of remote entries to unpack.
+ * @param local The local indices to check whether we know the remote
+ * indices.
+ * @param localEntries The number of local indices.
+ * @param type The mpi data type for unpacking.
+ * @param p_in The input buffer to unpack from.
+ * @param position The position in the buffer to start unpacking from.
+ * @param bufferSize The size of the input buffer.
+ */
+ inline void unpackIndices(RemoteIndexList& remote, int remoteEntries,
+ PairType** local, int localEntries, char* p_in,
+ MPI_Datatype type, int* position, int bufferSize,
+ bool fromOurself);
+
+ inline void unpackIndices(RemoteIndexList& send, RemoteIndexList& receive,
+ int remoteEntries, PairType** localSource,
+ int localSourceEntries, PairType** localDest,
+ int localDestEntries, char* p_in,
+ MPI_Datatype type, int* position, int bufferSize);
+
+ void unpackCreateRemote(char* p_in, PairType** sourcePairs, PairType** DestPairs,
+ int remoteProc, int sourcePublish, int destPublish,
+ int bufferSize, bool sendTwo, bool fromOurSelf=false);
+ };
+
+ /** @} */
+
+ /**
+ * @brief Modifier for adding and/or deleting remote indices from
+ * the remote index list.
+ *
+ * In some cases all the information about the indices also present
+ * on remote process might already be known. In this case this
+ * information can be provided to the RemoteIndices via this modifier.
+ * This prevents the global communication needed by a call to
+ * RemoteIndices::rebuild.
+ *
+ * In some cases it might advisable to run IndicesSyncer::sync afterwards.
+ *
+ * @warning Use with care. If the indices are not consistent afterwards
+ * communication attempts might deadlock!
+ */
+ template<class T, class A, bool mode>
+ class RemoteIndexListModifier
+ {
+
+ template<typename T1, typename A1>
+ friend class RemoteIndices;
+
+ public:
+ class InvalidPosition : public RangeError
+ {};
+
+ enum {
+ /**
+ * @brief If true the index set corresponding to the
+ * remote indices might get modified.
+ *
+ * If for example new indices are added to an index set
+ * all pointers of the remote indices to the local indices
+ * become invalid after ParallelIndexSet::endResize() was called.
+ */
+ MODIFYINDEXSET=mode
+ };
+
+ /**
+ * @brief Type of the index set we use.
+ */
+ typedef T ParallelIndexSet;
+
+ /**
+ * @brief The type of the global index.
+ */
+ typedef typename ParallelIndexSet::GlobalIndex GlobalIndex;
+
+ /**
+ * @brief The type of the local index.
+ */
+ typedef typename ParallelIndexSet::LocalIndex LocalIndex;
+
+ /**
+ * @brief The type of the attribute.
+ */
+ typedef typename LocalIndex::Attribute Attribute;
+
+ /**
+ * @brief Type of the remote indices we manage.
+ */
+ typedef Dune::RemoteIndex<GlobalIndex,Attribute> RemoteIndex;
+
+ /**
+ * @brief The type of the allocator for the remote index list.
+ */
+ typedef A Allocator;
+
+ /** @brief The type of the remote index list. */
+ typedef Dune::SLList<RemoteIndex,Allocator>
+ RemoteIndexList;
+
+ /**
+ * @brief The type of the modifying iterator of the remote index list.
+ */
+ typedef SLListModifyIterator<RemoteIndex,Allocator> ModifyIterator;
+
+ /**
+ * @brief The type of the remote index list iterator.
+ */
+ typedef typename RemoteIndexList::const_iterator ConstIterator;
+
+ /**
+ * @brief Insert an index to the list.
+ *
+ * Moves to the position where the index fits and inserts it.
+ * After the insertion only indices with an bigger global index
+ * than the inserted can be inserted.
+ *
+ * This method is only available if MODIFYINDEXSET is false.
+ *
+ * @param index The index to insert.
+ * @exception InvalidPosition Thrown if the index at the current position or
+ * the one before has bigger global index than the one to be inserted.
+ */
+ void insert(const RemoteIndex& index);
+
+
+ /**
+ * @brief Insert an index to the list.
+ *
+ * Moves to the position where the index fits and inserts it.
+ * After the insertion only indices with an bigger global index
+ * than the inserted can be inserted.
+ *
+ * This method is only available if MODIFYINDEXSET is true.
+ *
+ * @param index The index to insert.
+ * @param global The global index of the remote index.
+ * @exception InvalidPosition Thrown if the index at the current position or
+ * the one before has bigger global index than the one to be inserted.
+ */
+ void insert(const RemoteIndex& index, const GlobalIndex& global);
+
+ /**
+ * @brief Remove a remote index.
+ * @param global The global index corresponding to the remote index.
+ * @return True If there was a corresponding remote index.
+ * @exception InvalidPostion If there was an insertion or deletion of
+ * a remote index corresponding to a bigger global index before.
+ */
+ bool remove(const GlobalIndex& global);
+
+ /**
+ * @brief Repair the pointers to the local index pairs.
+ *
+ * Due to adding new indices or/and deleting indices in the
+ * index set all pointers to the local index pair might become
+ * invalid during ParallelIndexSet::endResize().
+ * This method repairs them.
+ *
+ * @exception InvalidIndexSetState Thrown if the underlying
+ * index set is not in ParallelIndexSetState::GROUND mode (only when
+ * compiled with DUNE_ISTL_WITH_CHECKING!).
+ */
+ void repairLocalIndexPointers();
+
+
+ RemoteIndexListModifier(const RemoteIndexListModifier&);
+
+ /**
+ * @brief Default constructor.
+ * @warning Object is not usable!
+ */
+ RemoteIndexListModifier()
+ : glist_()
+ {}
+
+ private:
+
+ /**
+ * @brief Create a modifier for a remote index list.
+ * @param indexSet The set of indices the process knows.
+ * @param rList The list of remote indices to modify.
+ */
+ RemoteIndexListModifier(const ParallelIndexSet& indexSet,
+ RemoteIndexList& rList);
+
+ typedef SLList<GlobalIndex,Allocator> GlobalList;
+ typedef typename GlobalList::ModifyIterator GlobalModifyIterator;
+ RemoteIndexList* rList_;
+ const ParallelIndexSet* indexSet_;
+ GlobalList glist_;
+ ModifyIterator iter_;
+ GlobalModifyIterator giter_;
+ ConstIterator end_;
+ bool first_;
+ GlobalIndex last_;
+ };
+
+ /**
+ * @brief A collective iterator for moving over the remote indices for
+ * all processes collectively.
+ */
+ template<class T, class A>
+ class CollectiveIterator
+ {
+
+ /**
+ * @brief Type of the index set we use.
+ */
+ typedef T ParallelIndexSet;
+
+ /**
+ * @brief The type of the global index.
+ */
+ typedef typename ParallelIndexSet::GlobalIndex GlobalIndex;
+
+ /**
+ * @brief The type of the local index.
+ */
+ typedef typename ParallelIndexSet::LocalIndex LocalIndex;
+
+ /**
+ * @brief The type of the attribute.
+ */
+ typedef typename LocalIndex::Attribute Attribute;
+
+ /** @brief The remote index type */
+ typedef Dune::RemoteIndex<GlobalIndex,Attribute> RemoteIndex;
+
+ /** @brief The allocator of the remote indices. */
+ using Allocator = typename std::allocator_traits<A>::template rebind_alloc<RemoteIndex>;
+
+ /** @brief The type of the remote index list. */
+ typedef Dune::SLList<RemoteIndex,Allocator> RemoteIndexList;
+
+ /** @brief The of map for storing the iterators. */
+ typedef std::map<int,std::pair<typename RemoteIndexList::const_iterator,
+ const typename RemoteIndexList::const_iterator> >
+ Map;
+
+ public:
+
+ /** @brief The type of the map from rank to remote index list. */
+ typedef std::map<int, std::pair<RemoteIndexList*,RemoteIndexList*> >
+ RemoteIndexMap;
+
+ /**
+ * @brief Constructor.
+ * @param map_ The map of the remote indices.
+ * @param send True if we want iterate over the remote indices used for sending.
+ */
+ inline CollectiveIterator(const RemoteIndexMap& map_, bool send);
+
+ /**
+ * @brief Advances all underlying iterators.
+ *
+ * All iterators are advanced until they point to a remote index whose
+ * global id is bigger or equal to global.
+ * Iterators pointing to their end are removed.
+ * @param global The index we search for.
+ */
+ inline void advance(const GlobalIndex& global);
+
+ /**
+ * @brief Advances all underlying iterators.
+ *
+ * All iterators are advanced until they point to a remote index whose
+ * global id is bigger or equal to global.
+ * Iterators pointing to their end are removed.
+ * @param global The index we search for.
+ * @param attribute The attribute we search for.
+ */
+ inline void advance(const GlobalIndex& global, const Attribute& attribute);
+
+ CollectiveIterator& operator++();
+
+ /**
+ * @brief Checks whether there are still iterators in the map.
+ */
+ inline bool empty() const;
+
+ /**
+ * @brief Iterator over the valid underlying iterators.
+ *
+ * An iterator is valid if it points to a remote index whose
+ * global id is equal to the one currently examined.
+ */
+ class iterator
+ {
+ public:
+ typedef typename Map::iterator RealIterator;
+ typedef typename Map::iterator ConstRealIterator;
+
+
+ //! \todo Please doc me!
+ iterator(const RealIterator& iter, const ConstRealIterator& end, GlobalIndex& index)
+ : iter_(iter), end_(end), index_(index), hasAttribute(false)
+ {
+ // Move to the first valid entry
+ while(iter_!=end_ && iter_->second.first->localIndexPair().global()!=index_)
+ ++iter_;
+ }
+
+ iterator(const RealIterator& iter, const ConstRealIterator& end, GlobalIndex index,
+ Attribute attribute)
+ : iter_(iter), end_(end), index_(index), attribute_(attribute), hasAttribute(true)
+ {
+ // Move to the first valid entry or the end
+ while(iter_!=end_ && (iter_->second.first->localIndexPair().global()!=index_
+ || iter_->second.first->localIndexPair().local().attribute()!=attribute))
+ ++iter_;
+ }
+ //! \todo Please doc me!
+ iterator(const iterator& other)
+ : iter_(other.iter_), end_(other.end_), index_(other.index_)
+ { }
+
+ //! \todo Please doc me!
+ iterator& operator++()
+ {
+ ++iter_;
+ // If entry is not valid move on
+ while(iter_!=end_ && (iter_->second.first->localIndexPair().global()!=index_ ||
+ (hasAttribute &&
+ iter_->second.first->localIndexPair().local().attribute()!=attribute_)))
+ ++iter_;
+ assert(iter_==end_ ||
+ (iter_->second.first->localIndexPair().global()==index_));
+ assert(iter_==end_ || !hasAttribute ||
+ (iter_->second.first->localIndexPair().local().attribute()==attribute_));
+ return *this;
+ }
+
+ //! \todo Please doc me!
+ const RemoteIndex& operator*() const
+ {
+ return *(iter_->second.first);
+ }
+
+ //! \todo Please doc me!
+ int process() const
+ {
+ return iter_->first;
+ }
+
+ //! \todo Please doc me!
+ const RemoteIndex* operator->() const
+ {
+ return iter_->second.first.operator->();
+ }
+
+ //! \todo Please doc me!
+ bool operator==(const iterator& other) const
+ {
+ return other.iter_==iter_;
+ }
+
+ //! \todo Please doc me!
+ bool operator!=(const iterator& other) const
+ {
+ return other.iter_!=iter_;
+ }
+
+ private:
+ iterator();
+
+ RealIterator iter_;
+ RealIterator end_;
+ GlobalIndex index_;
+ Attribute attribute_;
+ bool hasAttribute;
+ };
+
+ iterator begin();
+
+ iterator end();
+
+ private:
+
+ Map map_;
+ GlobalIndex index_;
+ Attribute attribute_;
+ bool noattribute;
+ };
+
+ template<typename TG, typename TA>
+ MPI_Datatype MPITraits<IndexPair<TG,ParallelLocalIndex<TA> > >::getType()
+ {
+ if(type==MPI_DATATYPE_NULL) {
+ int length[2] = {1, 1};
+ MPI_Aint base;
+ MPI_Aint disp[2];
+ MPI_Datatype types[2] = {MPITraits<TG>::getType(),
+ MPITraits<ParallelLocalIndex<TA> >::getType()};
+ IndexPair<TG,ParallelLocalIndex<TA> > rep;
+ MPI_Get_address(&rep, &base); // lower bound of the datatype
+ MPI_Get_address(&(rep.global_), &disp[0]);
+ MPI_Get_address(&(rep.local_), &disp[1]);
+ for (MPI_Aint& d : disp)
+ d -= base;
+
+ MPI_Datatype tmp;
+ MPI_Type_create_struct(2, length, disp, types, &tmp);
+
+ MPI_Type_create_resized(tmp, 0, sizeof(IndexPair<TG,ParallelLocalIndex<TA> >), &type);
+ MPI_Type_commit(&type);
+
+ MPI_Type_free(&tmp);
+ }
+ return type;
+ }
+
+ template<typename TG, typename TA>
+ MPI_Datatype MPITraits<IndexPair<TG,ParallelLocalIndex<TA> > >::type=MPI_DATATYPE_NULL;
+
+ template<typename T1, typename T2>
+ RemoteIndex<T1,T2>::RemoteIndex(const T2& attribute, const PairType* local)
+ : localIndex_(local), attribute_(static_cast<std::underlying_type_t<T2>>(attribute))
+ {}
+
+ template<typename T1, typename T2>
+ RemoteIndex<T1,T2>::RemoteIndex(const T2& attribute)
+ : localIndex_(0), attribute_(static_cast<std::underlying_type_t<T2>>(attribute))
+ {}
+
+ template<typename T1, typename T2>
+ RemoteIndex<T1,T2>::RemoteIndex()
+ : localIndex_(0), attribute_()
+ {}
+ template<typename T1, typename T2>
+ inline bool RemoteIndex<T1,T2>::operator==(const RemoteIndex& ri) const
+ {
+ return localIndex_==ri.localIndex_ && attribute_==ri.attribute;
+ }
+
+ template<typename T1, typename T2>
+ inline bool RemoteIndex<T1,T2>::operator!=(const RemoteIndex& ri) const
+ {
+ return localIndex_!=ri.localIndex_ || attribute_!=ri.attribute_;
+ }
+
+ template<typename T1, typename T2>
+ inline const T2 RemoteIndex<T1,T2>::attribute() const
+ {
+ return T2(attribute_);
+ }
+
+ template<typename T1, typename T2>
+ inline const IndexPair<T1,ParallelLocalIndex<T2> >& RemoteIndex<T1,T2>::localIndexPair() const
+ {
+ return *localIndex_;
+ }
+
+ template<typename T, typename A>
+ inline RemoteIndices<T,A>::RemoteIndices(const ParallelIndexSet& source,
+ const ParallelIndexSet& destination,
+ const MPI_Comm& comm,
+ const std::vector<int>& neighbours,
+ bool includeSelf_)
+ : source_(&source), target_(&destination), comm_(comm),
+ sourceSeqNo_(-1), destSeqNo_(-1), publicIgnored(false), firstBuild(true),
+ includeSelf(includeSelf_)
+ {
+ setNeighbours(neighbours);
+ }
+
+ template<typename T, typename A>
+ void RemoteIndices<T,A>::setIncludeSelf(bool b)
+ {
+ includeSelf=b;
+ }
+
+ template<typename T, typename A>
+ RemoteIndices<T,A>::RemoteIndices()
+ : source_(0), target_(0), sourceSeqNo_(-1),
+ destSeqNo_(-1), publicIgnored(false), firstBuild(true),
+ includeSelf(false)
+ {}
+
+ template<class T, typename A>
+ void RemoteIndices<T,A>::setIndexSets(const ParallelIndexSet& source,
+ const ParallelIndexSet& destination,
+ const MPI_Comm& comm,
+ const std::vector<int>& neighbours)
+ {
+ free();
+ source_ = &source;
+ target_ = &destination;
+ comm_ = comm;
+ firstBuild = true;
+ setNeighbours(neighbours);
+ }
+
+ template<typename T, typename A>
+ const typename RemoteIndices<T,A>::ParallelIndexSet&
+ RemoteIndices<T,A>::sourceIndexSet() const
+ {
+ return *source_;
+ }
+
+
+ template<typename T, typename A>
+ const typename RemoteIndices<T,A>::ParallelIndexSet&
+ RemoteIndices<T,A>::destinationIndexSet() const
+ {
+ return *target_;
+ }
+
+
+ template<typename T, typename A>
+ RemoteIndices<T,A>::~RemoteIndices()
+ {
+ free();
+ }
+
+ template<typename T, typename A>
+ template<bool ignorePublic>
+ inline void RemoteIndices<T,A>::packEntries(IndexPair<GlobalIndex,LocalIndex>** pairs,
+ const ParallelIndexSet& indexSet,
+ char* p_out, MPI_Datatype type,
+ int bufferSize,
+ int *position,
+ [[maybe_unused]] int n)
+ {
+ // fill with own indices
+ const auto end = indexSet.end();
+
+ //Now pack the source indices
+ int i=0;
+ for(auto index = indexSet.begin(); index != end; ++index)
+ if(ignorePublic || index->local().isPublic()) {
+
+ MPI_Pack(const_cast<PairType*>(&(*index)), 1,
+ type,
+ p_out, bufferSize, position, comm_);
+ pairs[i++] = const_cast<PairType*>(&(*index));
+
+ }
+ assert(i==n);
+ }
+
+ template<typename T, typename A>
+ inline int RemoteIndices<T,A>::noPublic(const ParallelIndexSet& indexSet)
+ {
+
+ int noPublic=0;
+
+ const auto end=indexSet.end();
+ for(auto index=indexSet.begin(); index!=end; ++index)
+ if(index->local().isPublic())
+ noPublic++;
+
+ return noPublic;
+
+ }
+
+
+ template<typename T, typename A>
+ inline void RemoteIndices<T,A>::unpackCreateRemote(char* p_in, PairType** sourcePairs,
+ PairType** destPairs, int remoteProc,
+ int sourcePublish, int destPublish,
+ int bufferSize, bool sendTwo,
+ bool fromOurSelf)
+ {
+
+ // unpack the number of indices we received
+ int noRemoteSource=-1, noRemoteDest=-1;
+ char twoIndexSets=0;
+ int position=0;
+ // Did we receive two index sets?
+ MPI_Unpack(p_in, bufferSize, &position, &twoIndexSets, 1, MPI_CHAR, comm_);
+ // The number of source indices received
+ MPI_Unpack(p_in, bufferSize, &position, &noRemoteSource, 1, MPI_INT, comm_);
+ // The number of destination indices received
+ MPI_Unpack(p_in, bufferSize, &position, &noRemoteDest, 1, MPI_INT, comm_);
+
+
+ // Indices for which we receive
+ RemoteIndexList* receive= new RemoteIndexList();
+ // Indices for which we send
+ RemoteIndexList* send=0;
+
+ MPI_Datatype type= MPITraits<PairType>::getType();
+
+ if(!twoIndexSets) {
+ if(sendTwo) {
+ send = new RemoteIndexList();
+ // Create both remote index sets simultaneously
+ unpackIndices(*send, *receive, noRemoteSource, sourcePairs, sourcePublish,
+ destPairs, destPublish, p_in, type, &position, bufferSize);
+ }else{
+ // we only need one list
+ unpackIndices(*receive, noRemoteSource, sourcePairs, sourcePublish,
+ p_in, type, &position, bufferSize, fromOurSelf);
+ send=receive;
+ }
+ }else{
+
+ int oldPos=position;
+ // Two index sets received
+ unpackIndices(*receive, noRemoteSource, destPairs, destPublish,
+ p_in, type, &position, bufferSize, fromOurSelf);
+ if(!sendTwo)
+ //unpack source entries again as destination entries
+ position=oldPos;
+
+ send = new RemoteIndexList();
+ unpackIndices(*send, noRemoteDest, sourcePairs, sourcePublish,
+ p_in, type, &position, bufferSize, fromOurSelf);
+ }
+
+ if(receive->empty() && send->empty()) {
+ if(send==receive) {
+ delete send;
+ }else{
+ delete send;
+ delete receive;
+ }
+ }else{
+ remoteIndices_.insert(std::make_pair(remoteProc,
+ std::make_pair(send,receive)));
+ }
+ }
+
+
+ template<typename T, typename A>
+ template<bool ignorePublic>
+ inline void RemoteIndices<T,A>::buildRemote(bool includeSelf_)
+ {
+ // Processor configuration
+ int rank, procs;
+ MPI_Comm_rank(comm_, &rank);
+ MPI_Comm_size(comm_, &procs);
+
+ // number of local indices to publish
+ // The indices of the destination will be send.
+ int sourcePublish, destPublish;
+
+ // Do we need to send two index sets?
+ char sendTwo = (source_ != target_);
+
+ if(procs==1 && !(sendTwo || includeSelf_))
+ // Nothing to communicate
+ return;
+
+ sourcePublish = (ignorePublic) ? source_->size() : noPublic(*source_);
+
+ if(sendTwo)
+ destPublish = (ignorePublic) ? target_->size() : noPublic(*target_);
+ else
+ // we only need to send one set of indices
+ destPublish = 0;
+
+ int maxPublish, publish=sourcePublish+destPublish;
+
+ // Calucate maximum number of indices send
+ MPI_Allreduce(&publish, &maxPublish, 1, MPI_INT, MPI_MAX, comm_);
+
+ // allocate buffers
+ PairType** destPairs;
+ PairType** sourcePairs = new PairType*[sourcePublish>0 ? sourcePublish : 1];
+
+ if(sendTwo)
+ destPairs = new PairType*[destPublish>0 ? destPublish : 1];
+ else
+ destPairs=sourcePairs;
+
+ char** buffer = new char*[2];
+ int bufferSize;
+ int position=0;
+ int intSize;
+ int charSize;
+
+ // calculate buffer size
+ MPI_Datatype type = MPITraits<PairType>::getType();
+
+ MPI_Pack_size(maxPublish, type, comm_,
+ &bufferSize);
+ MPI_Pack_size(1, MPI_INT, comm_,
+ &intSize);
+ MPI_Pack_size(1, MPI_CHAR, comm_,
+ &charSize);
+ // Our message will contain the following:
+ // a bool whether two index sets where sent
+ // the size of the source and the dest indexset,
+ // then the source and destination indices
+ bufferSize += 2 * intSize + charSize;
+
+ if(bufferSize<=0) bufferSize=1;
+
+ buffer[0] = new char[bufferSize];
+ buffer[1] = new char[bufferSize];
+
+
+ // pack entries into buffer[0], p_out below!
+ MPI_Pack(&sendTwo, 1, MPI_CHAR, buffer[0], bufferSize, &position,
+ comm_);
+
+ // The number of indices we send for each index set
+ MPI_Pack(&sourcePublish, 1, MPI_INT, buffer[0], bufferSize, &position,
+ comm_);
+ MPI_Pack(&destPublish, 1, MPI_INT, buffer[0], bufferSize, &position,
+ comm_);
+
+ // Now pack the source indices and setup the destination pairs
+ packEntries<ignorePublic>(sourcePairs, *source_, buffer[0], type,
+ bufferSize, &position, sourcePublish);
+ // If necessary send the dest indices and setup the source pairs
+ if(sendTwo)
+ packEntries<ignorePublic>(destPairs, *target_, buffer[0], type,
+ bufferSize, &position, destPublish);
+
+
+ // Update remote indices for ourself
+ if(sendTwo|| includeSelf_)
+ unpackCreateRemote(buffer[0], sourcePairs, destPairs, rank, sourcePublish,
+ destPublish, bufferSize, sendTwo, includeSelf_);
+
+ neighbourIds.erase(rank);
+
+ if(neighbourIds.size()==0)
+ {
+ Dune::dvverb<<rank<<": Sending messages in a ring"<<std::endl;
+ // send messages in ring
+ for(int proc=1; proc<procs; proc++) {
+ // pointers to the current input and output buffers
+ char* p_out = buffer[1-(proc%2)];
+ char* p_in = buffer[proc%2];
+
+ MPI_Status status;
+ if(rank%2==0) {
+ MPI_Ssend(p_out, bufferSize, MPI_PACKED, (rank+1)%procs,
+ commTag_, comm_);
+ MPI_Recv(p_in, bufferSize, MPI_PACKED, (rank+procs-1)%procs,
+ commTag_, comm_, &status);
+ }else{
+ MPI_Recv(p_in, bufferSize, MPI_PACKED, (rank+procs-1)%procs,
+ commTag_, comm_, &status);
+ MPI_Ssend(p_out, bufferSize, MPI_PACKED, (rank+1)%procs,
+ commTag_, comm_);
+ }
+
+
+ // The process these indices are from
+ int remoteProc = (rank+procs-proc)%procs;
+
+ unpackCreateRemote(p_in, sourcePairs, destPairs, remoteProc, sourcePublish,
+ destPublish, bufferSize, sendTwo);
+
+ }
+
+ }
+ else
+ {
+ MPI_Request* requests=new MPI_Request[neighbourIds.size()];
+ MPI_Request* req=requests;
+
+ typedef typename std::set<int>::size_type size_type;
+ size_type noNeighbours=neighbourIds.size();
+
+ // setup sends
+ for(std::set<int>::iterator neighbour=neighbourIds.begin();
+ neighbour!= neighbourIds.end(); ++neighbour) {
+ // Only send the information to the neighbouring processors
+ MPI_Issend(buffer[0], position , MPI_PACKED, *neighbour, commTag_, comm_, req++);
+ }
+
+ //Test for received messages
+
+ for(size_type received=0; received <noNeighbours; ++received)
+ {
+ MPI_Status status;
+ // probe for next message
+ MPI_Probe(MPI_ANY_SOURCE, commTag_, comm_, &status);
+ int remoteProc=status.MPI_SOURCE;
+ int size;
+ MPI_Get_count(&status, MPI_PACKED, &size);
+ // receive message
+ MPI_Recv(buffer[1], size, MPI_PACKED, remoteProc,
+ commTag_, comm_, &status);
+
+ unpackCreateRemote(buffer[1], sourcePairs, destPairs, remoteProc, sourcePublish,
+ destPublish, bufferSize, sendTwo);
+ }
+ // wait for completion of pending requests
+ MPI_Status* statuses = new MPI_Status[neighbourIds.size()];
+
+ if(MPI_ERR_IN_STATUS==MPI_Waitall(neighbourIds.size(), requests, statuses)) {
+ for(size_type i=0; i < neighbourIds.size(); ++i)
+ if(statuses[i].MPI_ERROR!=MPI_SUCCESS) {
+ std::cerr<<rank<<": MPI_Error occurred while receiving message."<<std::endl;
+ MPI_Abort(comm_, 999);
+ }
+ }
+ delete[] requests;
+ delete[] statuses;
+ }
+
+
+ // delete allocated memory
+ if(destPairs!=sourcePairs)
+ delete[] destPairs;
+
+ delete[] sourcePairs;
+ delete[] buffer[0];
+ delete[] buffer[1];
+ delete[] buffer;
+ }
+
+ template<typename T, typename A>
+ inline void RemoteIndices<T,A>::unpackIndices(RemoteIndexList& remote,
+ int remoteEntries,
+ PairType** local,
+ int localEntries,
+ char* p_in,
+ MPI_Datatype type,
+ int* position,
+ int bufferSize,
+ bool fromOurSelf)
+ {
+ if(remoteEntries==0)
+ return;
+
+ PairType index;
+ MPI_Unpack(p_in, bufferSize, position, &index, 1,
+ type, comm_);
+ GlobalIndex oldGlobal=index.global();
+ int n_in=0, localIndex=0;
+
+ //Check if we know the global index
+ while(localIndex<localEntries) {
+ if(local[localIndex]->global()==index.global()) {
+ int oldLocalIndex=localIndex;
+
+ while(localIndex<localEntries &&
+ local[localIndex]->global()==index.global()) {
+ if(!fromOurSelf || index.local().attribute() !=
+ local[localIndex]->local().attribute())
+ // if index is from us it has to have a different attribute
+ remote.push_back(RemoteIndex(index.local().attribute(),
+ local[localIndex]));
+ localIndex++;
+ }
+
+ // unpack next remote index
+ if((++n_in) < remoteEntries) {
+ MPI_Unpack(p_in, bufferSize, position, &index, 1,
+ type, comm_);
+ if(index.global()==oldGlobal)
+ // Restart comparison for the same global indices
+ localIndex=oldLocalIndex;
+ else
+ oldGlobal=index.global();
+ }else{
+ // No more received indices
+ break;
+ }
+ continue;
+ }
+
+ if (local[localIndex]->global()<index.global()) {
+ // compare with next entry in our list
+ ++localIndex;
+ }else{
+ // We do not know the index, unpack next
+ if((++n_in) < remoteEntries) {
+ MPI_Unpack(p_in, bufferSize, position, &index, 1,
+ type, comm_);
+ oldGlobal=index.global();
+ }else
+ // No more received indices
+ break;
+ }
+ }
+
+ // Unpack the other received indices without doing anything
+ while(++n_in < remoteEntries)
+ MPI_Unpack(p_in, bufferSize, position, &index, 1,
+ type, comm_);
+ }
+
+
+ template<typename T, typename A>
+ inline void RemoteIndices<T,A>::unpackIndices(RemoteIndexList& send,
+ RemoteIndexList& receive,
+ int remoteEntries,
+ PairType** localSource,
+ int localSourceEntries,
+ PairType** localDest,
+ int localDestEntries,
+ char* p_in,
+ MPI_Datatype type,
+ int* position,
+ int bufferSize)
+ {
+ int n_in=0, sourceIndex=0, destIndex=0;
+
+ //Check if we know the global index
+ while(n_in<remoteEntries && (sourceIndex<localSourceEntries || destIndex<localDestEntries)) {
+ // Unpack next index
+ PairType index;
+ MPI_Unpack(p_in, bufferSize, position, &index, 1,
+ type, comm_);
+ n_in++;
+
+ // Advance until global index in localSource and localDest are >= than the one in the unpacked index
+ while(sourceIndex<localSourceEntries && localSource[sourceIndex]->global()<index.global())
+ sourceIndex++;
+
+ while(destIndex<localDestEntries && localDest[destIndex]->global()<index.global())
+ destIndex++;
+
+ // Add a remote index if we found the global index.
+ if(sourceIndex<localSourceEntries && localSource[sourceIndex]->global()==index.global())
+ send.push_back(RemoteIndex(index.local().attribute(),
+ localSource[sourceIndex]));
+
+ if(destIndex < localDestEntries && localDest[destIndex]->global() == index.global())
+ receive.push_back(RemoteIndex(index.local().attribute(),
+ localDest[sourceIndex]));
+ }
+
+ }
+
+ template<typename T, typename A>
+ inline void RemoteIndices<T,A>::free()
+ {
+ auto lend = remoteIndices_.end();
+ for(auto lists=remoteIndices_.begin(); lists != lend; ++lists) {
+ if(lists->second.first==lists->second.second) {
+ // there is only one remote index list.
+ delete lists->second.first;
+ }else{
+ delete lists->second.first;
+ delete lists->second.second;
+ }
+ }
+ remoteIndices_.clear();
+ firstBuild=true;
+ }
+
+ template<typename T, typename A>
+ inline int RemoteIndices<T,A>::neighbours() const
+ {
+ return remoteIndices_.size();
+ }
+
+ template<typename T, typename A>
+ template<bool ignorePublic>
+ inline void RemoteIndices<T,A>::rebuild()
+ {
+ // Test whether a rebuild is Needed.
+ if(firstBuild ||
+ ignorePublic!=publicIgnored || !
+ isSynced()) {
+ free();
+
+ buildRemote<ignorePublic>(includeSelf);
+
+ sourceSeqNo_ = source_->seqNo();
+ destSeqNo_ = target_->seqNo();
+ firstBuild=false;
+ publicIgnored=ignorePublic;
+ }
+
+
+ }
+
+ template<typename T, typename A>
+ inline bool RemoteIndices<T,A>::isSynced() const
+ {
+ return sourceSeqNo_==source_->seqNo() && destSeqNo_ ==target_->seqNo();
+ }
+
+ template<typename T, typename A>
+ template<bool mode, bool send>
+ RemoteIndexListModifier<T,A,mode> RemoteIndices<T,A>::getModifier(int process)
+ {
+
+ // The user are on their own now!
+ // We assume they know what they are doing and just set the
+ // remote indices to synced status.
+ sourceSeqNo_ = source_->seqNo();
+ destSeqNo_ = target_->seqNo();
+
+ typename RemoteIndexMap::iterator found = remoteIndices_.find(process);
+
+ if(found == remoteIndices_.end())
+ {
+ if(source_ != target_)
+ found = remoteIndices_.insert(found, std::make_pair(process,
+ std::make_pair(new RemoteIndexList(),
+ new RemoteIndexList())));
+ else{
+ RemoteIndexList* rlist = new RemoteIndexList();
+ found = remoteIndices_.insert(found,
+ std::make_pair(process,
+ std::make_pair(rlist, rlist)));
+ }
+ }
+
+ firstBuild = false;
+
+ if(send)
+ return RemoteIndexListModifier<T,A,mode>(*source_, *(found->second.first));
+ else
+ return RemoteIndexListModifier<T,A,mode>(*target_, *(found->second.second));
+ }
+
+ template<typename T, typename A>
+ inline typename RemoteIndices<T,A>::const_iterator
+ RemoteIndices<T,A>::find(int proc) const
+ {
+ return remoteIndices_.find(proc);
+ }
+
+ template<typename T, typename A>
+ inline typename RemoteIndices<T,A>::const_iterator
+ RemoteIndices<T,A>::begin() const
+ {
+ return remoteIndices_.begin();
+ }
+
+ template<typename T, typename A>
+ inline typename RemoteIndices<T,A>::const_iterator
+ RemoteIndices<T,A>::end() const
+ {
+ return remoteIndices_.end();
+ }
+
+
+ template<typename T, typename A>
+ bool RemoteIndices<T,A>::operator==(const RemoteIndices& ri) const
+ {
+ if(neighbours()!=ri.neighbours())
+ return false;
+
+ const auto rend = remoteIndices_.end();
+
+ for(auto rindex = remoteIndices_.begin(), rindex1=ri.remoteIndices_.begin(); rindex!=rend; ++rindex, ++rindex1) {
+ if(rindex->first != rindex1->first)
+ return false;
+ if(*(rindex->second.first) != *(rindex1->second.first))
+ return false;
+ if(*(rindex->second.second) != *(rindex1->second.second))
+ return false;
+ }
+ return true;
+ }
+
+ template<class T, class A, bool mode>
+ RemoteIndexListModifier<T,A,mode>::RemoteIndexListModifier(const ParallelIndexSet& indexSet,
+ RemoteIndexList& rList)
+ : rList_(&rList), indexSet_(&indexSet), iter_(rList.beginModify()), end_(rList.end()), first_(true)
+ {
+ if(MODIFYINDEXSET) {
+ assert(indexSet_);
+ for(ConstIterator iter=iter_; iter != end_; ++iter)
+ glist_.push_back(iter->localIndexPair().global());
+ giter_ = glist_.beginModify();
+ }
+ }
+
+ template<typename T, typename A, bool mode>
+ RemoteIndexListModifier<T,A,mode>::RemoteIndexListModifier(const RemoteIndexListModifier<T,A,mode>& other)
+ : rList_(other.rList_), indexSet_(other.indexSet_),
+ glist_(other.glist_), iter_(other.iter_), giter_(other.giter_), end_(other.end_),
+ first_(other.first_), last_(other.last_)
+ {}
+
+ template<typename T, typename A, bool mode>
+ inline void RemoteIndexListModifier<T,A,mode>::repairLocalIndexPointers()
+ {
+ if(MODIFYINDEXSET) {
+ // repair pointers to local index set.
+#ifdef DUNE_ISTL_WITH_CHECKING
+ if(indexSet_->state()!=GROUND)
+ DUNE_THROW(InvalidIndexSetState, "Index has to be in ground mode for repairing pointers to indices");
+#endif
+ auto giter = glist_.begin();
+ auto index = indexSet_->begin();
+
+ for(auto iter=rList_->begin(); iter != end_; ++iter) {
+ while(index->global()<*giter) {
+ ++index;
+#ifdef DUNE_ISTL_WITH_CHECKING
+ if(index == indexSet_->end())
+ DUNE_THROW(InvalidPosition, "No such global index in set!");
+#endif
+ }
+
+#ifdef DUNE_ISTL_WITH_CHECKING
+ if(index->global() != *giter)
+ DUNE_THROW(InvalidPosition, "No such global index in set!");
+#endif
+ iter->localIndex_ = &(*index);
+ }
+ }
+ }
+
+ template<typename T, typename A, bool mode>
+ inline void RemoteIndexListModifier<T,A,mode>::insert(const RemoteIndex& index)
+ {
+ static_assert(!mode,"Not allowed if the mode indicates that new indices"
+ "might be added to the underlying index set. Use "
+ "insert(const RemoteIndex&, const GlobalIndex&) instead");
+
+#ifdef DUNE_ISTL_WITH_CHECKING
+ if(!first_ && index.localIndexPair().global()<last_)
+ DUNE_THROW(InvalidPosition, "Modifcation of remote indices have to occur with ascending global index.");
+#endif
+ // Move to the correct position
+ while(iter_ != end_ && iter_->localIndexPair().global() < index.localIndexPair().global()) {
+ ++iter_;
+ }
+
+ // No duplicate entries allowed
+ assert(iter_==end_ || iter_->localIndexPair().global() != index.localIndexPair().global());
+ iter_.insert(index);
+ last_ = index.localIndexPair().global();
+ first_ = false;
+ }
+
+ template<typename T, typename A, bool mode>
+ inline void RemoteIndexListModifier<T,A,mode>::insert(const RemoteIndex& index, const GlobalIndex& global)
+ {
+ static_assert(mode,"Not allowed if the mode indicates that no new indices"
+ "might be added to the underlying index set. Use "
+ "insert(const RemoteIndex&) instead");
+#ifdef DUNE_ISTL_WITH_CHECKING
+ if(!first_ && global<last_)
+ DUNE_THROW(InvalidPosition, "Modification of remote indices have to occur with ascending global index.");
+#endif
+ // Move to the correct position
+ while(iter_ != end_ && *giter_ < global) {
+ ++giter_;
+ ++iter_;
+ }
+
+ // No duplicate entries allowed
+ assert(iter_->localIndexPair().global() != global);
+ iter_.insert(index);
+ giter_.insert(global);
+
+ last_ = global;
+ first_ = false;
+ }
+
+ template<typename T, typename A, bool mode>
+ bool RemoteIndexListModifier<T,A,mode>::remove(const GlobalIndex& global)
+ {
+#ifdef DUNE_ISTL_WITH_CHECKING
+ if(!first_ && global<last_)
+ DUNE_THROW(InvalidPosition, "Modifcation of remote indices have to occur with ascending global index.");
+#endif
+
+ bool found= false;
+
+ if(MODIFYINDEXSET) {
+ // Move to the correct position
+ while(iter_!=end_ && *giter_< global) {
+ ++giter_;
+ ++iter_;
+ }
+ if(*giter_ == global) {
+ giter_.remove();
+ iter_.remove();
+ found=true;
+ }
+ }else{
+ while(iter_!=end_ && iter_->localIndexPair().global() < global)
+ ++iter_;
+
+ if(iter_->localIndexPair().global()==global) {
+ iter_.remove();
+ found = true;
+ }
+ }
+
+ last_ = global;
+ first_ = false;
+ return found;
+ }
+
+ template<typename T, typename A>
+ template<bool send>
+ inline typename RemoteIndices<T,A>::CollectiveIteratorT RemoteIndices<T,A>::iterator() const
+ {
+ return CollectiveIterator<T,A>(remoteIndices_, send);
+ }
+
+ template<typename T, typename A>
+ inline MPI_Comm RemoteIndices<T,A>::communicator() const
+ {
+ return comm_;
+
+ }
+
+ template<typename T, typename A>
+ CollectiveIterator<T,A>::CollectiveIterator(const RemoteIndexMap& pmap, bool send)
+ {
+
+ const auto end = pmap.end();
+ for(auto process = pmap.begin(); process != end; ++process) {
+ const RemoteIndexList* list = send ? process->second.first : process->second.second;
+ using ri_iterator = typename RemoteIndexList::const_iterator;
+ map_.insert(std::make_pair(process->first,
+ std::pair<ri_iterator, const ri_iterator>(list->begin(), list->end())));
+ }
+ }
+
+ template<typename T, typename A>
+ inline void CollectiveIterator<T,A>::advance(const GlobalIndex& index)
+ {
+ const auto end = map_.end();
+
+ for(auto iter = map_.begin(); iter != end;) {
+ // Step the iterator until we are >= index
+ typename RemoteIndexList::const_iterator current = iter->second.first;
+ typename RemoteIndexList::const_iterator rend = iter->second.second;
+ RemoteIndex remoteIndex;
+ if(current != rend)
+ remoteIndex = *current;
+
+ while(iter->second.first!=iter->second.second && iter->second.first->localIndexPair().global()<index)
+ ++(iter->second.first);
+
+ // erase from the map if there are no more entries.
+ if(iter->second.first == iter->second.second)
+ map_.erase(iter++);
+ else{
+ ++iter;
+ }
+ }
+ index_=index;
+ noattribute=true;
+ }
+
+ template<typename T, typename A>
+ inline void CollectiveIterator<T,A>::advance(const GlobalIndex& index,
+ const Attribute& attribute)
+ {
+ const auto end = map_.end();
+
+ for(auto iter = map_.begin(); iter != end;) {
+ // Step the iterator until we are >= index
+ typename RemoteIndexList::const_iterator current = iter->second.first;
+ typename RemoteIndexList::const_iterator rend = iter->second.second;
+ RemoteIndex remoteIndex;
+ if(current != rend)
+ remoteIndex = *current;
+
+ // Move to global index or bigger
+ while(iter->second.first!=iter->second.second && iter->second.first->localIndexPair().global()<index)
+ ++(iter->second.first);
+
+ // move to attribute or bigger
+ while(iter->second.first!=iter->second.second
+ && iter->second.first->localIndexPair().global()==index
+ && iter->second.first->localIndexPair().local().attribute()<attribute)
+ ++(iter->second.first);
+
+ // erase from the map if there are no more entries.
+ if(iter->second.first == iter->second.second)
+ map_.erase(iter++);
+ else{
+ ++iter;
+ }
+ }
+ index_=index;
+ attribute_=attribute;
+ noattribute=false;
+ }
+
+ template<typename T, typename A>
+ inline CollectiveIterator<T,A>& CollectiveIterator<T,A>::operator++()
+ {
+ const auto end = map_.end();
+
+ for(auto iter = map_.begin(); iter != end;) {
+ // Step the iterator until we are >= index
+ auto current = iter->second.first;
+ auto rend = iter->second.second;
+
+ // move all iterators pointing to the current global index to next value
+ if(iter->second.first->localIndexPair().global()==index_ &&
+ (noattribute || iter->second.first->localIndexPair().local().attribute() == attribute_))
+ ++(iter->second.first);
+
+ // erase from the map if there are no more entries.
+ if(iter->second.first == iter->second.second)
+ map_.erase(iter++);
+ else{
+ ++iter;
+ }
+ }
+ return *this;
+ }
+
+ template<typename T, typename A>
+ inline bool CollectiveIterator<T,A>::empty() const
+ {
+ return map_.empty();
+ }
+
+ template<typename T, typename A>
+ inline typename CollectiveIterator<T,A>::iterator
+ CollectiveIterator<T,A>::begin()
+ {
+ if(noattribute)
+ return iterator(map_.begin(), map_.end(), index_);
+ else
+ return iterator(map_.begin(), map_.end(), index_,
+ attribute_);
+ }
+
+ template<typename T, typename A>
+ inline typename CollectiveIterator<T,A>::iterator
+ CollectiveIterator<T,A>::end()
+ {
+ return iterator(map_.end(), map_.end(), index_);
+ }
+
+ template<typename TG, typename TA>
+ inline std::ostream& operator<<(std::ostream& os, const RemoteIndex<TG,TA>& index)
+ {
+ os<<"[global="<<index.localIndexPair().global()<<", remote attribute="<<index.attribute()<<" local attribute="<<index.localIndexPair().local().attribute()<<"]";
+ return os;
+ }
+
+ template<typename T, typename A>
+ inline std::ostream& operator<<(std::ostream& os, const RemoteIndices<T,A>& indices)
+ {
+ int rank;
+ MPI_Comm_rank(indices.comm_, &rank);
+ const auto rend = indices.remoteIndices_.end();
+
+ for(auto rindex = indices.remoteIndices_.begin(); rindex!=rend; ++rindex) {
+ os<<rank<<": Prozess "<<rindex->first<<":";
+
+ if(!rindex->second.first->empty()) {
+ os<<" send:";
+
+ const auto send= rindex->second.first->end();
+
+ for(auto index = rindex->second.first->begin();
+ index != send; ++index)
+ os<<*index<<" ";
+ os<<std::endl;
+ }
+ if(!rindex->second.second->empty()) {
+ os<<rank<<": Prozess "<<rindex->first<<": "<<"receive: ";
+
+ for(const auto& index : *(rindex->second.second))
+ os << index << " ";
+ }
+ os<<std::endl<<std::flush;
+ }
+ return os;
+ }
+ /** @} */
+}
+
+#endif // HAVE_MPI
+
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_SELECTION_HH
+#define DUNE_SELECTION_HH
+
+#include "indexset.hh"
+#include <dune/common/iteratorfacades.hh>
+
+namespace Dune
+{
+ /** @addtogroup Common_Parallel
+ *
+ * @{
+ */
+ /**
+ * @file
+ * @brief Provides classes for selecting
+ * indices based on attribute flags.
+ * @author Markus Blatt
+ */
+
+ /**
+ * @brief A const iterator over an uncached selection.
+ */
+ template<typename TS, typename TG, typename TL, int N>
+ class SelectionIterator
+ {
+ public:
+ /**
+ * @brief The type of the Set of attributes.
+ *
+ * It has to provide a static method
+ * \code bool contains(AttributeType a); \endcode
+ * that returns true if a is in the set.
+ * Such types are EnumItem, EnumRange, Combine.
+ */
+ typedef TS AttributeSet;
+
+ /**
+ * @brief The type of the underlying index set.
+ */
+ typedef Dune::ParallelIndexSet<TG,TL,N> ParallelIndexSet;
+
+ //typedef typename ParallelIndexSet::const_iterator ParallelIndexSetIterator;
+
+ typedef ConstArrayListIterator<IndexPair<TG,TL>, N, std::allocator<Dune::IndexPair<TG,TL> > > ParallelIndexSetIterator;
+ /**
+ * @brief Constructor.
+ * @param iter The iterator over the index set.
+ * @param end The iterator over the index set positioned at the end.
+ */
+ SelectionIterator(const ParallelIndexSetIterator& iter, const ParallelIndexSetIterator& end)
+ : iter_(iter), end_(end)
+ {
+ // Step to the first valid entry
+ while(iter_!=end_ && !AttributeSet::contains(iter_->local().attribute()))
+ ++iter_;
+ }
+
+ void operator++()
+ {
+ assert(iter_!=end_);
+ for(++iter_; iter_!=end_; ++iter_)
+ if(AttributeSet::contains(iter_->local().attribute()))
+ break;
+ }
+
+
+ uint32_t operator*() const
+ {
+ return iter_->local().local();
+ }
+
+ bool operator==(const SelectionIterator<TS,TG,TL,N>& other) const
+ {
+ return iter_ == other.iter_;
+ }
+
+ bool operator!=(const SelectionIterator<TS,TG,TL,N>& other) const
+ {
+ return iter_ != other.iter_;
+ }
+
+ private:
+ ParallelIndexSetIterator iter_;
+ const ParallelIndexSetIterator end_;
+ };
+
+
+ /**
+ * @brief An uncached selection of indices.
+ */
+ template<typename TS, typename TG, typename TL, int N>
+ class UncachedSelection
+ {
+ public:
+ /**
+ * @brief The type of the Set of attributes.
+ *
+ * It has to provide a static method
+ * \code bool contains(AttributeType a); \endcode
+ * that returns true if a is in the set.
+ * Such types are EnumItem, EnumRange, Combine.
+ */
+ typedef TS AttributeSet;
+
+ /**
+ * @brief The type of the global index of the underlying index set.
+ */
+ typedef TG GlobalIndex;
+
+ /**
+ * @brief The type of the local index of the underlying index set.
+ *
+ * It has to provide a function
+ * \code AttributeType attribute(); \endcode
+ */
+ typedef TL LocalIndex;
+
+ /**
+ * @brief The type of the underlying index set.
+ */
+ typedef Dune::ParallelIndexSet<GlobalIndex,LocalIndex,N> ParallelIndexSet;
+
+ /**
+ * @brief The type of the iterator of the selected indices.
+ */
+ typedef SelectionIterator<TS,TG,TL,N> iterator;
+
+ /**
+ * @brief The type of the iterator of the selected indices.
+ */
+ typedef iterator const_iterator;
+
+ UncachedSelection()
+ : indexSet_()
+ {}
+
+ UncachedSelection(const ParallelIndexSet& indexset)
+ : indexSet_(&indexset)
+ {}
+ /**
+ * @brief Set the index set of the selection.
+ * @param indexset The index set to use.
+ */
+ void setIndexSet(const ParallelIndexSet& indexset);
+
+ /**
+ * @brief Get the index set we are a selection for.
+ */
+ //const ParallelIndexSet& indexSet() const;
+
+ /**
+ * @brief Get an iterator over the selected indices.
+ * @return An iterator positioned at the first selected index.
+ */
+ const_iterator begin() const;
+
+ /**
+ * @brief Get an iterator over the selected indices.
+ * @return An iterator positioned at the first selected index.
+ */
+ const_iterator end() const;
+
+
+ private:
+ const ParallelIndexSet* indexSet_;
+
+ };
+
+ /**
+ * @brief A cached selection of indices.
+ */
+ template<typename TS, typename TG, typename TL, int N>
+ class Selection
+ {
+ public:
+ /**
+ * @brief The type of the set of attributes.
+ *
+ * It has to provide a static method
+ * \code bool contains(AttributeType a); \endcode
+ * that returns true if a is in the set.
+ * Such types are EnumItem, EnumRange, Combine.
+ */
+ typedef TS AttributeSet;
+
+ /**
+ * @brief The type of the global index of the underlying index set.
+ */
+ typedef TG GlobalIndex;
+
+ /**
+ * @brief The type of the local index of the underlying index set.
+ *
+ * It has to provide a function
+ * \code AttributeType attribute(); \endcode
+ */
+ typedef TL LocalIndex;
+
+ /**
+ * @brief The type of the underlying index set.
+ */
+ typedef Dune::ParallelIndexSet<GlobalIndex,LocalIndex,N> ParallelIndexSet;
+
+ /**
+ * @brief The type of the iterator of the selected indices.
+ */
+ typedef uint32_t* iterator;
+
+ /**
+ * @brief The type of the iterator of the selected indices.
+ */
+ typedef uint32_t* const_iterator;
+
+ Selection()
+ : selected_()
+ {}
+
+ Selection(const ParallelIndexSet& indexset)
+ : selected_(), size_(0), built_(false)
+ {
+ setIndexSet(indexset);
+ }
+
+ ~Selection();
+
+ /**
+ * @brief Set the index set of the selection.
+ * @param indexset The index set to use.
+ */
+ void setIndexSet(const ParallelIndexSet& indexset);
+
+ /**
+ * @brief Free allocated memory.
+ */
+ void free();
+
+ /**
+ * @brief Get the index set we are a selection for.
+ */
+ //IndexSet indexSet() const;
+
+ /**
+ * @brief Get an iterator over the selected indices.
+ * @return An iterator positioned at the first selected index.
+ */
+ const_iterator begin() const;
+
+ /**
+ * @brief Get an iterator over the selected indices.
+ * @return An iterator positioned at the first selected index.
+ */
+ const_iterator end() const;
+
+
+ private:
+ uint32_t* selected_;
+ size_t size_;
+ bool built_;
+
+ };
+
+ template<typename TS, typename TG, typename TL, int N>
+ inline void Selection<TS,TG,TL,N>::setIndexSet(const ParallelIndexSet& indexset)
+ {
+ if(built_)
+ free();
+
+ // Count the number of entries the selection has to hold
+ typedef typename ParallelIndexSet::const_iterator const_iterator;
+ const const_iterator end = indexset.end();
+ int entries = 0;
+
+ for(const_iterator index = indexset.begin(); index != end; ++index)
+ if(AttributeSet::contains(index->local().attribute()))
+ ++entries;
+
+ selected_ = new uint32_t[entries];
+ built_ = true;
+
+ entries = 0;
+ for(const_iterator index = indexset.begin(); index != end; ++index)
+ if(AttributeSet::contains(index->local().attribute()))
+ selected_[entries++]= index->local().local();
+
+ size_=entries;
+ built_=true;
+ }
+
+ template<typename TS, typename TG, typename TL, int N>
+ uint32_t* Selection<TS,TG,TL,N>::begin() const
+ {
+ return selected_;
+ }
+
+ template<typename TS, typename TG, typename TL, int N>
+ uint32_t* Selection<TS,TG,TL,N>::end() const
+ {
+ return selected_+size_;
+ }
+
+ template<typename TS, typename TG, typename TL, int N>
+ inline void Selection<TS,TG,TL,N>::free()
+ {
+ delete[] selected_;
+ size_=0;
+ built_=false;
+ }
+
+ template<typename TS, typename TG, typename TL, int N>
+ inline Selection<TS,TG,TL,N>::~Selection()
+ {
+ if(built_)
+ free();
+ }
+
+ template<typename TS, typename TG, typename TL, int N>
+ SelectionIterator<TS,TG,TL,N> UncachedSelection<TS,TG,TL,N>::begin() const
+ {
+ return SelectionIterator<TS,TG,TL,N>(indexSet_->begin(),
+ indexSet_->end());
+ }
+
+ template<typename TS, typename TG, typename TL, int N>
+ SelectionIterator<TS,TG,TL,N> UncachedSelection<TS,TG,TL,N>::end() const
+ {
+ return SelectionIterator<TS,TG,TL,N>(indexSet_->end(),
+ indexSet_->end());
+ }
+ template<typename TS, typename TG, typename TL, int N>
+ void UncachedSelection<TS,TG,TL,N>::setIndexSet(const ParallelIndexSet& indexset)
+ {
+ indexSet_ = &indexset;
+ }
+
+ /** @} */
+
+
+}
+#endif
--- /dev/null
+dune_add_test(SOURCES communicationtest.cc
+ LINK_LIBRARIES dunecommon
+ MPI_RANKS 1 2 4
+ TIMEOUT 300
+ CMAKE_GUARD MPI_FOUND
+ LABELS quick)
+
+dune_add_test(SOURCES indexsettest.cc
+ LINK_LIBRARIES dunecommon
+ LABELS quick)
+
+dune_add_test(SOURCES remoteindicestest.cc
+ LINK_LIBRARIES dunecommon
+ MPI_RANKS 1 2 4
+ TIMEOUT 300
+ CMAKE_GUARD MPI_FOUND
+ LABELS quick)
+
+dune_add_test(SOURCES selectiontest.cc
+ LINK_LIBRARIES dunecommon
+ LABELS quick)
+
+dune_add_test(SOURCES syncertest.cc
+ LINK_LIBRARIES dunecommon
+ MPI_RANKS 1 2 4
+ TIMEOUT 300
+ CMAKE_GUARD MPI_FOUND
+ LABELS quick)
+
+dune_add_test(SOURCES variablesizecommunicatortest.cc
+ MPI_RANKS 1 2 4
+ TIMEOUT 300
+ CMAKE_GUARD MPI_FOUND
+ LABELS quick)
+
+dune_add_test(SOURCES mpidatatest.cc
+ LINK_LIBRARIES dunecommon
+ MPI_RANKS 2
+ TIMEOUT 300
+ LABELS quick)
+
+dune_add_test(SOURCES mpifuturetest.cc
+ LINK_LIBRARIES dunecommon
+ MPI_RANKS 1 2 4
+ TIMEOUT 300
+ LABELS quick)
+
+dune_add_test(SOURCES mpipacktest.cc
+ LINK_LIBRARIES dunecommon
+ MPI_RANKS 2
+ TIMEOUT 300
+ CMAKE_GUARD MPI_FOUND
+ LABELS quick)
+
+dune_add_test(SOURCES mpigatherscattertest.cc
+ LINK_LIBRARIES dunecommon
+ MPI_RANKS 2
+ TIMEOUT 300
+ CMAKE_GUARD MPI_FOUND
+ LABELS quick)
--- /dev/null
+#if HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <dune/common/parallel/mpihelper.hh>
+int main(int argc, char** argv)
+{
+ Dune::MPIHelper::instance(argc, argv);
+
+ int ret = 0;
+ Dune::No_Comm nc1, nc2;
+
+ if ( !(nc1 == nc2))
+ {
+ std::cerr << "operator==: No_Comms need to compare equal"<<std::endl;
+ ++ret;
+ }
+ if ( nc1 != nc2)
+ {
+ std::cerr << "operator!=: No_Comms need to compare equal"<<std::endl;
+ ++ret;
+ }
+
+ if (Dune::FakeMPIHelper::getCommunicator() != Dune::FakeMPIHelper::getLocalCommunicator())
+ {
+ std::cerr << "FakeMPIHelper::getCommunicator() and FakeMPIHelper::getLocalCommunicator()) should be equal"<<std::endl;
+ ++ret;
+ }
+
+ nc1 = Dune::FakeMPIHelper::getCommunication();
+
+ [[maybe_unused]] Dune::MPIHelper::MPICommunicator comm = Dune::MPIHelper::getCommunication();
+
+#if HAVE_MPI
+ if (MPI_COMM_SELF != Dune::MPIHelper::getLocalCommunicator())
+ {
+ std::cerr<<"Dune::MPIHelper::getLocalCommunicator() gives wrong result"<<std::endl;
+ ++ret;
+ }
+ if (MPI_COMM_WORLD != Dune::MPIHelper::getCommunicator())
+ {
+ std::cerr<<"Dune::MPIHelper::getCommunicator() gives wrong result"<<std::endl;
+ ++ret;
+ }
+#endif
+ return ret;
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+#if HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <cstdlib>
+#include <iostream>
+#include <ostream>
+
+#include <dune/common/parallel/indexset.hh>
+#include <dune/common/parallel/localindex.hh>
+
+int testDeleteIndices()
+{
+ Dune::ParallelIndexSet<int,Dune::LocalIndex,15> indexSet;
+ Dune::ParallelIndexSet<int,Dune::LocalIndex,25> indexSet1;
+
+ indexSet.beginResize();
+ indexSet1.beginResize();
+
+ for(int i=0; i< 10; i++) {
+ indexSet.add(i, Dune::LocalIndex (i));
+ indexSet1.add(i, Dune::LocalIndex (i));
+ }
+
+ indexSet.endResize();
+ indexSet1.endResize();
+
+ typedef Dune::ParallelIndexSet<int,Dune::LocalIndex,15>::iterator
+ Iterator;
+
+ Iterator entry = indexSet.begin();
+ indexSet.beginResize();
+
+ for(int i=0; i < 5; i++)
+ ++entry;
+
+ indexSet.markAsDeleted(entry);
+
+ indexSet.endResize();
+
+ std::cout<<"Unchanged: "<<indexSet1<<std::endl;
+ std::cout<<"Deleted: "<<indexSet<<std::endl;
+
+ Iterator end = indexSet.end();
+ int i=0, ret=0;
+
+ for(entry = indexSet.begin(); entry != end; ++entry,++i)
+ if(entry->global()==5) {
+ std::cerr<<"Entry was not deleted!"<<std::endl;
+ ret++;
+ }
+
+ if(i>9) {
+ std::cerr<<"Number of entries not correct!"<<std::endl;
+ ret++;
+ }
+
+ Dune::ParallelIndexSet<int,Dune::LocalIndex,25>::iterator iter=indexSet1.begin();
+
+ // Test whether the local indices changed
+ for(entry = indexSet.begin(); entry != end; ++entry) {
+ while(iter->global() < entry->global())
+ iter++;
+ if(iter->global() != entry->global()) {
+ std::cerr <<" Global indices do not match!"<<std::endl;
+ ++ret;
+ }
+ if(iter->local() != entry->local()) {
+ std::cerr <<" Local indices do not match!"<<std::endl;
+ ++ret;
+ }
+ }
+
+ return ret;
+}
+
+int main(int, char **)
+{
+ std::exit(testDeleteIndices());
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+#include <config.h>
+
+#include <iostream>
+
+#include <dune/common/dynmatrix.hh>
+#include <dune/common/parallel/mpidata.hh>
+
+#include <dune/common/parallel/mpihelper.hh>
+using namespace Dune;
+
+int main(int argc, char** argv){
+ Dune::MPIHelper & mpihelper = Dune::MPIHelper::instance(argc, argv);
+ auto cc = mpihelper.getCommunication();
+
+ if(mpihelper.rank() == 0)
+ std::cout << "Test 1: static data (int)" << std::endl;
+ if(mpihelper.rank() == 0){
+ cc.send<int>(42, 1, 0);
+ int i = 42;
+ const int& j = i;
+ cc.send(j, 1, 0);
+ }
+ else if(mpihelper.rank() == 1){
+ std::cout << "receive: " << cc.recv<int>(0, 0, 0) << std::endl;
+ int i = 0;
+ cc.recv(i, 0, 0);
+ std::cout << i << std::endl;
+ }
+
+ if(mpihelper.rank() == 0)
+ std::cout << "Test 2: dynamic data (std::vector<double>)" << std::endl;
+ if(mpihelper.rank() == 0){
+ cc.send(std::vector<double>{ 42.0, 43.0, 4711}, 1, 0);
+ std::vector<double> vec{ 42.0, 43.0, 4711};
+ const std::vector<double>& vec_ref = vec;
+ cc.send(vec_ref, 1, 0);
+ cc.send(std::move(vec), 1, 0);
+ }
+ else if(mpihelper.rank() == 1){
+ auto vec = cc.recv(std::vector<double>{0,0,0}, 0, 0);
+ std::cout << "receive: ";
+ for(double d : vec)
+ std::cout << d << ",";
+ std::cout << "\b" << std::endl;
+ std::vector<double> vec2(3);
+ cc.recv(vec2, 0, 0);
+ for(double d : vec2)
+ std::cout << d << ",";
+ std::cout << "\b" << std::endl;
+
+ std::vector<double> vec3(3);
+ auto d = vec3.data();
+ std::vector<double> vec4 = cc.recv(std::move(vec3), 0, 0);
+ for(double d : vec4)
+ std::cout << d << ",";
+ std::cout << "\b" << std::endl;
+ if(d != vec4.data())
+ DUNE_THROW(Exception, "The vector has not the same memory");
+ }
+
+ if(mpihelper.rank() == 0)
+ std::cout << "Test 3: DynamicVector" << std::endl;
+ if(mpihelper.rank() == 0){
+ cc.send(DynamicVector<double>{ 42.0, 43.0, 4711}, 1, 0);
+ DynamicVector<double> vec{ 42.0, 43.0, 4711};
+ const DynamicVector<double>& vec_ref = vec;
+ cc.send(vec_ref, 1, 0);
+ cc.send(std::move(vec), 1, 0);
+ }
+ else if(mpihelper.rank() == 1){
+ auto vec = cc.recv(DynamicVector<double>{0,0,0}, 0, 0);
+ std::cout << "receive: ";
+ for(double d : vec)
+ std::cout << d << ",";
+ std::cout << "\b" << std::endl;
+ DynamicVector<double> vec2(3);
+ cc.recv(vec2, 0, 0);
+ for(double d : vec2)
+ std::cout << d << ",";
+ std::cout << "\b" << std::endl;
+
+ DynamicVector<double> vec3(3);
+ auto d = vec3.container().data();
+ DynamicVector<double> vec4 = cc.recv(std::move(vec3), 0, 0);
+ for(double d : vec4)
+ std::cout << d << ",";
+ std::cout << "\b" << std::endl;
+ if(d != vec4.container().data())
+ DUNE_THROW(Exception, "The vector has not the same memory");
+ }
+
+
+ if(mpihelper.rank() == 0)
+ std::cout << "Test 3: DynamicVector (resize receive)" << std::endl;
+ if(mpihelper.rank() == 0){
+ cc.send(DynamicVector<double>{ 42.0, 43.0, 4711}, 1, 0);
+ DynamicVector<double> vec{ 42.0, 43.0, 4711};
+ const DynamicVector<double>& vec_ref = vec;
+ cc.send(vec_ref, 1, 0);
+ cc.send(std::move(vec), 1, 0);
+ }
+ else if(mpihelper.rank() == 1){
+ auto vec = cc.rrecv(DynamicVector<double>{}, 0, 0);
+ std::cout << "receive: ";
+ for(double d : vec)
+ std::cout << d << ",";
+ std::cout << "\b" << std::endl;
+ DynamicVector<double> vec2(3);
+ cc.recv(vec2, 0, 0);
+ for(double d : vec2)
+ std::cout << d << ",";
+ std::cout << "\b" << std::endl;
+
+ DynamicVector<double> vec3(3);
+ auto d = vec3.container().data();
+ DynamicVector<double> vec4 = cc.recv(std::move(vec3), 0, 0);
+ for(double d : vec4)
+ std::cout << d << ",";
+ std::cout << "\b" << std::endl;
+ if(d != vec4.container().data())
+ DUNE_THROW(Exception, "The vector has not the same memory");
+ }
+
+ return 0;
+}
--- /dev/null
+#include <config.h>
+
+#include <iostream>
+#include <numeric>
+#include <dune/common/parallel/mpihelper.hh>
+#include <dune/common/dynvector.hh>
+
+namespace Dune {
+ template<class Dummy>
+ struct MPIData<const int&, Dummy>
+ {
+ static_assert(Dune::AlwaysFalse<Dummy>::value, "MPIData of reference type should not be used!");
+ // MPIData of reference type should not be used!
+ // This struct should never be used it just
+ // exists to generate a compiler error
+ };
+}
+
+
+int main(int argc, char** argv){
+ auto& mpihelper = Dune::MPIHelper::instance(argc, argv);
+
+ auto cc = mpihelper.getCommunication();
+
+ // p2p
+ if(mpihelper.size() > 1){
+ if(mpihelper.rank() == 0){
+ Dune::Future<int> f = cc.isend(42, 1, 0);
+ f.wait();
+ int i = 42;
+ Dune::Future<const int&> f2 = cc.isend<const int&>(i, 1, 0);
+ f2.wait();
+ }else if(mpihelper.rank() == 1){
+ Dune::Future<int> f = cc.irecv(41, 0, 0);
+ std::cout << "Rank 1 received " << f.get() << std::endl;
+ int j = 41;
+ Dune::Future<int&> f2 = cc.irecv<int&>(j, 0, 0);
+ std::cout << "Rank 1 received " << f2.get() << std::endl;
+ }
+ }
+
+ int answer;
+ if(mpihelper.rank() == 0){
+ std::cout << "Broadcast lvalue-reference" << std::endl;
+ answer = 42;
+ }
+ Dune::Future<int> f = cc.template ibroadcast(answer, 0);
+ f.wait();
+ std::cout << "Rank " << mpihelper.rank() << " knows: The answer is " << answer << std::endl;
+ if(mpihelper.rank() == 0)
+ std::cout << "Broadcast value" << std::endl;
+ Dune::Future<int> f2 = cc.template ibroadcast(int(answer), 0);
+ std::cout << "Rank " << mpihelper.rank() << " knows: The answer is " << f2.get() << std::endl;
+
+ Dune::DynamicVector<double> vec(3);
+ if(mpihelper.rank() == 0){
+ std::cout << "Broadcast vector" << std::endl;
+ std::iota(vec.begin(), vec.end(), 41);
+ }
+ Dune::Future<Dune::DynamicVector<double>> f3 = cc.ibroadcast(vec, 0);
+ f3.wait();
+ std::cout << "Rank " << mpihelper.rank() << " received vector: " << vec << std::endl;
+
+ if(mpihelper.rank() == 0)
+ std::cout << "nonb Barrier ==========================" << std::endl;
+ Dune::Future<void> f4 = cc.ibarrier();
+ f4.wait();
+
+ if(mpihelper.rank() == 0){
+ std::cout << "nonb gather ===========================" << std::endl;
+ Dune::Future<Dune::DynamicVector<int>> f = cc.igather(mpihelper.rank() + 42, Dune::DynamicVector<int>(mpihelper.size()), 0);
+ std::cout << "Gather result: " << f.get() << std::endl;
+ }else{
+ cc.igather(mpihelper.rank(), {}, 0).wait();
+ }
+
+ if(mpihelper.rank() == 0){
+ std::cout << "nonb scatter ===========================" << std::endl;
+ std::vector<int> my_buddies(mpihelper.size());
+ std::iota(my_buddies.begin(), my_buddies.end(), 42);
+ Dune::Future<int> f = cc.iscatter(my_buddies, 0, 0);
+ std::cout << "Scatter result (Rank " << mpihelper.rank() << "): " << f.get() << std::endl;
+ }else{
+ Dune::Future<int> f = cc.iscatter(std::vector<int>(0), 0, 0);
+ std::cout << "Scatter result (Rank " << mpihelper.rank() << "): " << f.get() << std::endl;
+ }
+
+ {
+ if(mpihelper.rank() == 0)
+ std::cout << "nonb allreduce ===========================" << std::endl;
+ Dune::Future<int> f = cc.iallreduce<std::plus<int>>(mpihelper.rank()+4, 0);
+ std::cout << "Allreduce result on rank " << mpihelper.rank() <<": " << f.get() << std::endl;
+ }
+
+ {
+ if(mpihelper.rank() == 0)
+ std::cout << "nonb allreduce inplace ===========================" << std::endl;
+ Dune::Future<Dune::DynamicVector<int>> f = cc.iallreduce<std::plus<int>>(Dune::DynamicVector<int>{42, 3+mpihelper.rank()});
+ std::cout << "Allreduce result on rank " << mpihelper.rank() <<": " << f.get() << std::endl;
+ }
+
+ {
+ if(mpihelper.rank() == 0)
+ std::cout << "check for MPI_SUM with double& ===========================" << std::endl;
+ double answer = 42;
+ auto f = cc.iallreduce<std::plus<double>>(answer);
+ std::cout << "Allreduce result on rank " << mpihelper.rank() <<": " << f.get() << std::endl;
+ }
+
+ // that's wrong, MPIFuture will hold a dangeling reference:
+ // Dune::MPIFuture<int&> g;
+ // {
+ // int i = 42;
+ // g = cc.iallreduce<std::plus<int>>(i);
+ // }
+ // g.wait();
+ return 0;
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+#include <config.h>
+
+#include <iostream>
+#include <array>
+#include <vector>
+
+
+#include <dune/common/parallel/mpihelper.hh>
+#include <dune/common/parallel/mpicommunication.hh>
+#include <dune/common/exceptions.hh>
+
+using namespace Dune;
+
+int main(int argc, char** argv){
+ MPIHelper & mpihelper = MPIHelper::instance(argc, argv);
+ auto cc = mpihelper.getCommunication();
+ int rank = cc.rank();
+ int size = cc.size();
+
+ std::array<double, 2> data = {1.0 + rank, 2.0 + rank};
+
+ auto gathered = cc.igather(data, std::vector<double>(rank==0?2*size:0), 0).get();
+
+ for(auto& d : gathered)
+ d += 1;
+
+ cc.iscatter(gathered, data, 0).get();
+ if(data[0] != 2+rank ||
+ data[1] != 3+rank){
+ DUNE_THROW(Exception, "Wrong result after gather - scatter");
+ }
+
+ return 0;
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+#if HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <iostream>
+
+#include <dune/common/parallel/mpihelper.hh>
+#include <dune/common/parallel/mpipack.hh>
+#include <dune/common/test/testsuite.hh>
+
+constexpr int TAG = 42;
+int main(int argc, char** argv){
+ Dune::MPIHelper& helper = Dune::MPIHelper::instance(argc, argv);
+ Dune::TestSuite suite;
+ suite.require(helper.size() == 2) << "This test must be executed on two processes";
+ auto comm = helper.getCommunication();
+ Dune::MPIPack pack(comm);
+
+ if(helper.rank() == 0){
+ pack << 3 << helper.rank();
+ pack << std::vector<int>{4711, 42};
+ comm.send(pack, 1, TAG);
+ }
+ if(helper.rank() == 1){
+ Dune::MPIPack pack = comm.rrecv(Dune::MPIPack(comm), 0, TAG);
+ int drei; pack >> drei;
+ int rank_0; pack >> rank_0;
+ std::vector<int> vec;
+ pack >> vec;
+ suite.check(drei==3) << "received wrong value";
+ suite.check(rank_0==0) << "received wrong value";
+ suite.check(vec.size() == 2) << "vector has wrong size!";
+ suite.check(vec[0] == 4711 && vec[1] == 42) << "vector contains wrong values!";
+ }
+
+ return 0;
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+#include <config.h>
+
+#include <algorithm>
+#include <iostream>
+#include <ostream>
+#include <string>
+#include <vector>
+
+#include <mpi.h>
+
+#include <dune/common/enumset.hh>
+#include <dune/common/parallel/communicator.hh>
+#include <dune/common/parallel/indexset.hh>
+#include <dune/common/parallel/interface.hh>
+#include <dune/common/parallel/plocalindex.hh>
+#include <dune/common/parallel/remoteindices.hh>
+
+enum GridFlags {
+ owner, overlap, border
+};
+
+class Array;
+
+std::ostream& operator<<(std::ostream& os, const Array& a);
+
+class Array
+{
+ friend std::ostream& operator<<(std::ostream& os, const Array& a);
+public:
+ typedef double value_type;
+ Array() : vals_(0), size_(0)
+ {}
+
+ Array(int size) : size_(size)
+ {
+ vals_ = new double[size];
+ }
+
+ void build(int size)
+ {
+ vals_ = new double[size];
+ size_ = size;
+ }
+
+ Array& operator+=(double d)
+ {
+ for(int i=0; i < size_; i++)
+ vals_[i]+=d;
+ return *this;
+ }
+
+ ~Array()
+ {
+ if(vals_!=0)
+ delete[] vals_;
+ }
+
+ const double& operator[](int i) const
+ {
+ return vals_[i];
+ }
+
+ double& operator[](int i)
+ {
+ return vals_[i];
+ }
+private:
+ Array(const Array&)
+ {}
+ double *vals_;
+ int size_;
+};
+
+struct ArrayGatherScatter
+{
+ static double gather(const Array& a, int i);
+
+ static void scatter(Array& a, double v, int i);
+
+};
+
+
+inline double ArrayGatherScatter::gather(const Array& a, int i)
+{
+ return a[i];
+}
+
+inline void ArrayGatherScatter::scatter(Array& a, double v, int i)
+{
+ a[i]=v;
+
+}
+
+std::ostream& operator<<(std::ostream& os, const Array& a)
+{
+ if(a.size_>0)
+ os<< "{ "<<a.vals_[0];
+
+ for(int i=1; i<a.size_; i++)
+ os <<", "<< a.vals_[i];
+
+ os << " }";
+ return os;
+}
+
+void testIndices(MPI_Comm comm)
+{
+ //using namespace Dune;
+
+ // The global grid size
+ const int Nx = 20;
+ const int Ny = 2;
+
+ // Process configuration
+ int procs, rank, master=0;
+ MPI_Comm_size(comm, &procs);
+ MPI_Comm_rank(comm, &rank);
+
+ // shift the ranks
+ //rank = (rank + 1) % procs;
+ //master= (master+1) %procs;
+
+ // The local grid
+ int nx = Nx/procs;
+ // distributed indexset
+ // typedef ParallelLocalIndex<GridFlags> LocalIndexType;
+
+ typedef Dune::ParallelIndexSet<int,Dune::ParallelLocalIndex<GridFlags>,45> ParallelIndexSet;
+
+ ParallelIndexSet distIndexSet;
+ // global indexset
+ ParallelIndexSet globalIndexSet;
+
+ // Set up the indexsets.
+ int start = std::max(rank*nx-1,0);
+ int end = std::min((rank + 1) * nx+1, Nx);
+
+ distIndexSet.beginResize();
+
+ int localIndex=0;
+ int size = Ny*(end-start);
+ Array distArray(size);
+ Array* globalArray;
+ int index=0;
+
+ std::cout<<rank<<": Nx="<<Nx<<" Ny="<<Ny<<" size="<<size<<std::endl;
+
+ for(int j=0; j<Ny; j++)
+ for(int i=start; i<end; i++) {
+ bool isPublic = (i<=start+1)||(i>=end-2);
+ GridFlags flag = owner;
+ if((i==start && i!=0)||(i==end-1 && i!=Nx-1)) {
+ distArray[index++]=-(i+j*Nx+rank*Nx*Ny);
+ flag = overlap;
+ }else
+ distArray[index++]=i+j*Nx+rank*Nx*Ny;
+
+ distIndexSet.add(i+j*Nx, Dune::ParallelLocalIndex<GridFlags> (localIndex++,flag,isPublic));
+ }
+
+ distIndexSet.endResize();
+
+ if(rank==master) {
+ // build global indexset on first process
+ globalIndexSet.beginResize();
+ globalArray=new Array(Nx*Ny);
+ int k=0;
+ for(int j=0; j<Ny; j++)
+ for(int i=0; i<Nx; i++) {
+ globalIndexSet.add(i+j*Nx, Dune::ParallelLocalIndex<GridFlags> (i+j*Nx,owner,false));
+ globalArray->operator[](i+j*Nx)=-(i+j*Nx);
+ k++;
+
+ }
+
+ globalIndexSet.endResize();
+ }else
+ globalArray=new Array(0);
+
+ typedef Dune::RemoteIndices<ParallelIndexSet> RemoteIndices;
+
+ RemoteIndices accuIndices(distIndexSet, globalIndexSet, comm);
+ RemoteIndices overlapIndices(distIndexSet, distIndexSet, comm);
+ accuIndices.rebuild<true>();
+ overlapIndices.rebuild<false>();
+
+ Dune::DatatypeCommunicator<ParallelIndexSet> accumulator, overlapExchanger;
+
+ Dune::EnumItem<GridFlags,owner> sourceFlags;
+ Dune::Combine<Dune::EnumItem<GridFlags,overlap>,Dune::EnumItem<GridFlags,owner>,GridFlags> destFlags;
+
+ accumulator.build(accuIndices, sourceFlags, distArray, destFlags, *globalArray);
+
+ overlapExchanger.build(overlapIndices, Dune::EnumItem<GridFlags,owner>(), distArray, Dune::EnumItem<GridFlags,overlap>(), distArray);
+
+ std::cout<< rank<<": before forward distArray="<< distArray<<std::endl;
+
+ // Exchange the overlap
+ overlapExchanger.forward();
+
+ std::cout<<rank<<": overlap exchanged distArray"<< distArray<<std::endl;
+
+ if(rank==master)
+ std::cout<<": before forward globalArray="<< *globalArray<<std::endl;
+
+ accumulator.forward();
+
+
+ if(rank==master) {
+ std::cout<<"after forward global: "<<*globalArray<<std::endl;
+ *globalArray+=1;
+ std::cout<<" added one: globalArray="<<*globalArray<<std::endl;
+ }
+
+ accumulator.backward();
+ std::cout<< rank<<": after backward distArray"<< distArray<<std::endl;
+
+
+ // Exchange the overlap
+ overlapExchanger.forward();
+
+ std::cout<<rank<<": overlap exchanged distArray"<< distArray<<std::endl;
+
+ //std::cout << rank<<": source and dest are the same:"<<std::endl;
+ //std::cout << remote<<std::endl<<std::flush;
+ if(rank==master)
+ delete globalArray;
+}
+
+
+template<int NX, int NY, typename TG, typename TA>
+void setupDistributed(Array& distArray, Dune::ParallelIndexSet<TG,Dune::ParallelLocalIndex<TA> >& distIndexSet,
+ int rank, int procs)
+{
+ // The local grid
+ int nx = NX/procs;
+ int mod = NX%procs;
+
+ // Set up the indexsets.
+ int start, end;
+ int ostart, oend;
+
+ if(rank<mod) {
+ start = rank * (nx + 1);
+ end = start + (nx + 1);
+ }else{
+ start = mod + rank * nx;
+ end = start + nx;
+ }
+
+ if(rank>0)
+ ostart = start - 1;
+ else
+ ostart = start;
+
+ if(rank<procs-1)
+ oend = end+1;
+ else
+ oend = end;
+
+ distIndexSet.beginResize();
+
+ int localIndex=0;
+ int size = NY*(oend-ostart);
+
+ distArray.build(size);
+
+ for(int j=0; j<NY; j++)
+ for(int i=ostart; i<oend; i++) {
+ bool isPublic = (i<=start+1)||(i>=end-1);
+ GridFlags flag = owner;
+ if((i<start || i>=end)) {
+ distArray[localIndex]=-(i+j*NX+rank*NX*NY);
+ flag = overlap;
+ }else
+ distArray[localIndex]=i+j*NX+rank*NX*NY;
+
+ distIndexSet.add(i+j*NX, Dune::ParallelLocalIndex<GridFlags> (localIndex++,flag,isPublic));
+ }
+
+ distIndexSet.endResize();
+
+
+}
+
+template<int NX,int NY, typename TG, typename TA>
+void setupGlobal(Array& globalArray, Dune::ParallelIndexSet<TG,Dune::ParallelLocalIndex<TA> >& globalIndexSet)
+{
+ // build global indexset on first process
+ globalIndexSet.beginResize();
+ globalArray.build(NX*NY);
+ int k=0;
+ for(int j=0; j<NY; j++)
+ for(int i=0; i<NX; i++) {
+ globalIndexSet.add(i+j*NX, Dune::ParallelLocalIndex<GridFlags> (i+j*NX,owner,false));
+ globalArray[i+j*NX]=-(i+j*NX);
+ k++;
+
+ }
+
+ globalIndexSet.endResize();
+}
+
+void testIndicesBuffered(MPI_Comm comm)
+{
+ //using namespace Dune;
+
+ // The global grid size
+ const int Nx = 8;
+ const int Ny = 1;
+
+ // Process configuration
+ int procs, rank, master=0;
+ MPI_Comm_size(comm, &procs);
+ MPI_Comm_rank(comm, &rank);
+
+ typedef Dune::ParallelIndexSet<int,Dune::ParallelLocalIndex<GridFlags> > ParallelIndexSet;
+
+ ParallelIndexSet distIndexSet;
+ // global indexset
+ ParallelIndexSet globalIndexSet;
+
+ Array distArray;
+ Array globalArray;
+
+ setupDistributed<Nx,Ny>(distArray, distIndexSet, rank, procs);
+
+
+ if(rank==master) {
+ setupGlobal<Nx,Ny>(globalArray, globalIndexSet);
+ }
+
+ typedef Dune::RemoteIndices<ParallelIndexSet> RemoteIndices;
+
+ RemoteIndices accuIndices(distIndexSet, globalIndexSet, comm);
+
+ accuIndices.rebuild<true>();
+ std::cout<<"dist "<<rank<<": "<<distIndexSet<<std::endl;
+ std::cout<<"global "<<rank<<": "<<globalIndexSet<<std::endl;
+ std::cout << accuIndices<<std::endl;
+ std::cout <<" end remote indices"<<std::endl;
+
+ RemoteIndices overlapIndices(distIndexSet, distIndexSet, comm);
+ overlapIndices.rebuild<false>();
+
+ Dune::Interface accuInterface;
+ Dune::Interface overlapInterface;
+ Dune::EnumItem<GridFlags,owner> sourceFlags;
+ Dune::Combine<Dune::EnumItem<GridFlags,overlap>,Dune::EnumItem<GridFlags,owner>,GridFlags> destFlags;
+ // Dune::Bool2Type<true> flag;
+
+ accuInterface.build(accuIndices, sourceFlags, destFlags);
+ overlapInterface.build(overlapIndices, Dune::EnumItem<GridFlags,owner>(),
+ Dune::EnumItem<GridFlags,overlap>());
+ overlapInterface.print();
+ accuInterface.print();
+
+ //accuInterface.print();
+
+ Dune::BufferedCommunicator accumulator, overlapExchanger;
+
+ accumulator.build<Array>(accuInterface);
+
+ overlapExchanger.build<Array>(overlapInterface);
+
+ std::cout<< rank<<": before forward distArray="<< distArray<<std::endl;
+
+ // Exchange the overlap
+ overlapExchanger.forward<ArrayGatherScatter>(distArray, distArray);
+
+ std::cout<<rank<<": overlap exchanged distArray"<< distArray<<std::endl;
+
+ if(rank==master)
+ std::cout<<": before forward globalArray="<< globalArray<<std::endl;
+
+ accumulator.forward<ArrayGatherScatter>(distArray, globalArray);
+
+
+ if(rank==master) {
+ std::cout<<"after forward global: "<<globalArray<<std::endl;
+ globalArray+=1;
+ std::cout<<" added one: globalArray="<<globalArray<<std::endl;
+ }
+
+ accumulator.backward<ArrayGatherScatter>(distArray, globalArray);
+ std::cout<< rank<<": after backward distArray"<< distArray<<std::endl;
+
+
+ // Exchange the overlap
+ overlapExchanger.forward<ArrayGatherScatter>(distArray);
+
+ std::cout<<rank<<": overlap exchanged distArray"<< distArray<<std::endl;
+
+ //std::cout << rank<<": source and dest are the same:"<<std::endl;
+ //std::cout << remote<<std::endl<<std::flush;
+}
+
+
+void testRedistributeIndices(MPI_Comm comm)
+{
+ using namespace Dune;
+
+ // The global grid size
+ const int Nx = 20;
+ const int Ny = 2;
+
+ // Process configuration
+ int procs, rank;
+ MPI_Comm_size(comm, &procs);
+ MPI_Comm_rank(comm, &rank);
+
+ // The local grid
+ int nx = Nx/procs;
+ // distributed indexset
+
+ typedef ParallelIndexSet<int,ParallelLocalIndex<GridFlags> > ParallelIndexSet;
+ ParallelIndexSet sendIndexSet;
+ // global indexset
+ ParallelIndexSet receiveIndexSet;
+
+ Array array, redistributedArray;
+
+ // Set up the indexsets.
+ {
+
+ int start = std::max(rank*nx-1,0);
+ int end = std::min((rank + 1) * nx+1, Nx);
+
+ sendIndexSet.beginResize();
+
+
+ array.build(Ny*(end-start));
+
+ for(int j=0, localIndex=0; j<Ny; j++)
+ for(int i=start; i<end; i++, localIndex++) {
+ bool isPublic = (i<=start+1)||(i>=end-2);
+ GridFlags flag = owner;
+
+ if((i==start && i!=0)||(i==end-1 && i!=Nx-1))
+ flag = overlap;
+
+ sendIndexSet.add(i+j*Nx, ParallelLocalIndex<GridFlags> (localIndex,flag,isPublic));
+ array[localIndex]=i+j*Nx+rank*Nx*Ny;
+ }
+
+ sendIndexSet.endResize();
+ }
+ {
+ int newrank = (rank + 1) % procs;
+
+ int start = std::max(newrank*nx-1,0);
+ int end = std::min((newrank + 1) * nx+1, Nx);
+
+ std::cout<<rank<<": "<<newrank<<" start="<<start<<" end"<<end<<std::endl;
+
+ redistributedArray.build(Ny*(end-start));
+
+ receiveIndexSet.beginResize();
+
+ for(int j=0, localIndex=0; j<Ny; j++)
+ for(int i=start; i<end; i++, localIndex++) {
+ bool isPublic = (i<=start+1)||(i>=end-2);
+ GridFlags flag = owner;
+
+ if((i==start && i!=0)||(i==end-1 && i!=Nx-1))
+ flag = overlap;
+
+ receiveIndexSet.add(i+j*Nx, ParallelLocalIndex<GridFlags> (localIndex,flag,isPublic));
+ redistributedArray[localIndex]=-1;
+ }
+
+ receiveIndexSet.endResize();
+ }
+
+
+ std::cout<< rank<<": distributed and global index set!"<<std::endl<<std::flush;
+ typedef RemoteIndices<ParallelIndexSet> RemoteIndices;
+
+ RemoteIndices redistributeIndices(sendIndexSet,
+ receiveIndexSet, comm);
+ RemoteIndices overlapIndices(receiveIndexSet, receiveIndexSet, comm);
+
+ redistributeIndices.rebuild<true>();
+ overlapIndices.rebuild<false>();
+
+ DatatypeCommunicator<ParallelIndexSet> redistribute, overlapComm;
+ EnumItem<GridFlags,owner> fowner;
+ EnumItem<GridFlags,overlap> foverlap;
+
+ redistribute.build(redistributeIndices, fowner, array, fowner, redistributedArray);
+
+ overlapComm.build(overlapIndices, fowner, redistributedArray, foverlap, redistributedArray);
+ std::cout<<rank<<": initial array: "<<array<<std::endl;
+
+ redistribute.forward();
+
+ std::cout<<rank<<": redistributed array: "<<redistributedArray<<std::endl;
+
+ overlapComm.forward();
+
+ std::cout<<rank<<": redistributed array with overlap communicated: "<<redistributedArray<<std::endl;
+}
+
+void testRedistributeIndicesBuffered(MPI_Comm comm)
+{
+ using namespace Dune;
+
+ // The global grid size
+ const int Nx = 20;
+ const int Ny = 2;
+
+ // Process configuration
+ int procs, rank;
+ MPI_Comm_size(comm, &procs);
+ MPI_Comm_rank(comm, &rank);
+
+ // The local grid
+ int nx = Nx/procs;
+ // distributed indexset
+
+ typedef ParallelIndexSet<int,ParallelLocalIndex<GridFlags> > ParallelIndexSet;
+ ParallelIndexSet sendIndexSet;
+ // global indexset
+ ParallelIndexSet receiveIndexSet;
+
+ Array array, redistributedArray;
+
+ std::vector<int> neighbours;
+
+ // Set up the indexsets.
+ {
+
+ int start = std::max(rank*nx-1,0);
+ int end = std::min((rank + 1) * nx+1, Nx);
+
+ neighbours.reserve(2);
+
+ if(rank>0) neighbours.push_back(rank-1);
+ if(rank<procs-1) neighbours.push_back(rank+1);
+
+ sendIndexSet.beginResize();
+
+
+ array.build(Ny*(end-start));
+
+ for(int j=0, localIndex=0; j<Ny; j++)
+ for(int i=start; i<end; i++, localIndex++) {
+ bool isPublic = (i<=start+1)||(i>=end-2);
+ GridFlags flag = owner;
+
+ if((i==start && i!=0)||(i==end-1 && i!=Nx-1))
+ flag = overlap;
+
+ sendIndexSet.add(i+j*Nx, ParallelLocalIndex<GridFlags> (localIndex,flag,isPublic));
+ array[localIndex]=i+j*Nx; //+rank*Nx*Ny;
+ if(flag==overlap)
+ array[localIndex]=-array[localIndex];
+ }
+
+ sendIndexSet.endResize();
+ }
+ {
+ int newrank = (rank + 1) % procs;
+
+ int start = std::max(newrank*nx-1,0);
+ int end = std::min((newrank + 1) * nx+1, Nx);
+
+ std::cout<<rank<<": "<<newrank<<" start="<<start<<" end"<<end<<std::endl;
+
+ redistributedArray.build(Ny*(end-start));
+
+ receiveIndexSet.beginResize();
+
+ for(int j=0, localIndex=0; j<Ny; j++)
+ for(int i=start; i<end; i++, localIndex++) {
+ bool isPublic = (i<=start+1)||(i>=end-2);
+ GridFlags flag = owner;
+
+ if((i==start && i!=0)||(i==end-1 && i!=Nx-1))
+ flag = overlap;
+
+ receiveIndexSet.add(i+j*Nx, ParallelLocalIndex<GridFlags> (localIndex,flag,isPublic));
+ redistributedArray[localIndex]=-1;
+ }
+
+ receiveIndexSet.endResize();
+ }
+
+
+ std::cout<< rank<<": distributed and global index set!"<<std::endl<<std::flush;
+
+ typedef RemoteIndices<ParallelIndexSet> RemoteIndices;
+ RemoteIndices redistributeIndices(sendIndexSet,
+ receiveIndexSet, comm);
+ RemoteIndices overlapIndices(receiveIndexSet, receiveIndexSet, comm);
+ RemoteIndices sendIndices(sendIndexSet,
+ sendIndexSet, comm, neighbours);
+ RemoteIndices sendIndices1(sendIndexSet,
+ sendIndexSet, comm);
+ overlapIndices.rebuild<false>();
+ redistributeIndices.rebuild<true>();
+ sendIndices.rebuild<true>();
+ sendIndices1.rebuild<true>();
+
+ if(rank==0)
+ std::cout<<sendIndices<<std::endl<<sendIndices1<<std::endl;
+
+ assert(sendIndices==sendIndices1);
+
+ std::cout<<redistributeIndices<<std::endl;
+
+ Interface redistributeInterface, overlapInterface;
+ EnumItem<GridFlags,owner> fowner;
+ EnumItem<GridFlags,overlap> foverlap;
+
+ redistributeInterface.build(redistributeIndices, fowner, fowner);
+ overlapInterface.build(overlapIndices, fowner, foverlap);
+
+ BufferedCommunicator redistribute;
+ BufferedCommunicator overlapComm;
+
+ redistribute.build(array, redistributedArray, redistributeInterface);
+ overlapComm.build<Array>(overlapInterface);
+
+ std::cout<<rank<<": initial array: "<<array<<std::endl;
+
+ redistribute.forward<ArrayGatherScatter>(array, redistributedArray);
+
+ std::cout<<rank<<": redistributed array: "<<redistributedArray<<std::endl;
+
+ redistributedArray +=1;
+
+ std::cout<<rank<<": redistributed array (added one): "<<redistributedArray<<std::endl;
+
+ overlapComm.forward<ArrayGatherScatter>(redistributedArray);
+
+ std::cout<<rank<<": redistributed array with overlap communicated: "<<redistributedArray<<std::endl;
+
+ redistribute.backward<ArrayGatherScatter>(array, redistributedArray);
+
+ std::cout<<rank<<": final array: "<<array<<std::endl;
+
+ redistribute.forward<ArrayGatherScatter>(array, redistributedArray);
+
+ std::cout<<rank<<": final array with overlap communicated: "<<array<<std::endl;
+}
+
+
+/**
+ * @brief MPI Error.
+ * Thrown when an mpi error occurs.
+ */
+class MPIError {
+public:
+ /** @brief Constructor. */
+ MPIError(std::string s, int e) : errorstring(s), errorcode(e){}
+ /** @brief The error string. */
+ std::string errorstring;
+ /** @brief The mpi error code. */
+ int errorcode;
+};
+
+void MPI_err_handler([[maybe_unused]] MPI_Comm *comm, int *err_code, ...){
+ char *err_string=new char[MPI_MAX_ERROR_STRING];
+ int err_length;
+ MPI_Error_string(*err_code, err_string, &err_length);
+ std::string s(err_string, err_length);
+ int rank;
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ std::cerr << rank<<": An MPI Error occurred:"<<std::endl<<s<<std::endl;
+ delete[] err_string;
+ throw MPIError(s, *err_code);
+}
+
+int main(int argc, char **argv)
+{
+ MPI_Init(&argc, &argv);
+ MPI_Errhandler handler;
+ MPI_Comm_create_errhandler(MPI_err_handler, &handler);
+ MPI_Comm_set_errhandler(MPI_COMM_WORLD, handler);
+ int rank;
+ int size;
+ const int firstRank=2;
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+
+ MPI_Comm comm;
+ int key = rank;
+
+ if(size>firstRank) {
+ if(rank==0)
+ key = firstRank;
+ if(rank==firstRank)
+ key=0;
+ }
+
+ MPI_Comm_split(MPI_COMM_WORLD, 0, key, &comm);
+
+#ifdef DEBUG
+ bool wait=1;
+ while(size>1 && wait) ;
+#endif
+
+ // testIndices(comm);
+ testIndicesBuffered(comm);
+
+ if(rank==0)
+ std::cout<<std::endl<<"Redistributing bla!"<<std::endl<<std::endl;
+ MPI_Barrier(comm);
+
+
+ // testRedistributeIndices(comm);
+ testRedistributeIndicesBuffered(comm);
+ MPI_Comm_free(&comm);
+ MPI_Finalize();
+
+ return 0;
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#include "config.h"
+
+#include <iostream>
+
+#include <dune/common/enumset.hh>
+#include <dune/common/parallel/indexset.hh>
+#include <dune/common/parallel/plocalindex.hh>
+#include <dune/common/parallel/selection.hh>
+#include <dune/common/timer.hh>
+
+enum GridFlags {
+ owner, overlap, border
+};
+
+template<class T>
+int meassure(const T& selection)
+{
+ /*
+ return meassure<1>(selection);
+ }
+
+ template<int LOOPS, class T>
+ int meassure(const T& selection)
+ {*/
+ typedef typename T::const_iterator iterator;
+
+ const iterator end = selection.end();
+
+ int count=0;
+ Dune::Timer timer;
+ timer.reset();
+ for(int i=0; i<10; i++)
+ for(iterator iter = selection.begin(); iter != end; ++iter)
+ count+=*iter;
+
+ std::cout<<" took "<< timer.elapsed()<<" seconds"<<std::endl;
+
+ return count;
+}
+
+template<int SIZE>
+void test()
+{
+ const int Nx = SIZE;
+ const int Ny = SIZE;
+
+ // Process configuration
+ const int ALSIZE=55;
+
+ Dune::ParallelIndexSet<int,Dune::ParallelLocalIndex<GridFlags>,ALSIZE> distIndexSet;
+
+ distIndexSet.beginResize();
+
+ for(int y=0, i=0; y < Ny; y++)
+ for(int x=0; x < Nx; x++, i++) {
+ GridFlags flag = owner;
+ if(x==0 || x == Nx-1 || y ==0 || y==Ny-1)
+ flag = overlap;
+
+ distIndexSet.add(i, Dune::ParallelLocalIndex<GridFlags> (i, flag, true));
+ }
+
+ distIndexSet.endResize();
+
+ Dune::UncachedSelection<Dune::EnumItem<GridFlags,owner>,int,Dune::ParallelLocalIndex<GridFlags>,ALSIZE>
+ ownerUncached(distIndexSet);
+
+ Dune::Selection<Dune::EnumItem<GridFlags,owner>,int,Dune::ParallelLocalIndex<GridFlags>,ALSIZE>
+ ownerCached(distIndexSet);
+
+ Dune::UncachedSelection<Dune::EnumItem<GridFlags,overlap>,int,Dune::ParallelLocalIndex<GridFlags>,ALSIZE>
+ overlapUncached(distIndexSet);
+
+ Dune::Selection<Dune::EnumItem<GridFlags,overlap>,int,Dune::ParallelLocalIndex<GridFlags>,ALSIZE>
+ overlapCached(distIndexSet);
+
+ int count=0;
+
+ std::cout<<" Owner selection uncached:";
+ count+=meassure(ownerUncached);
+ std::cout<<" Owner selection cached:";
+ count+=meassure(ownerCached);
+ std::cout<<" Overlap selection uncached:";
+ count+=meassure(overlapUncached);
+ std::cout<<" Overlap selection cached:";
+ count+=meassure(overlapCached);
+ std::cout<<count<<std::endl;
+}
+
+int main()
+{
+ test<1000>();
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#include "config.h"
+
+#include <dune/common/parallel/indicessyncer.hh>
+#include <dune/common/sllist.hh>
+#include <string>
+#include <tuple>
+#include <iostream>
+
+enum GridFlags {
+ owner, overlap, border
+};
+
+template<typename T>
+void deleteOverlapEntries(T& indices,
+ Dune::RemoteIndices<T>& remoteIndices)
+{
+ typedef typename T::iterator IndexIterator;
+ typedef typename T::GlobalIndex GlobalIndex;
+ typedef typename T::LocalIndex::Attribute Attribute;
+ typedef Dune::RemoteIndices<T> RemoteIndices;
+ typedef typename RemoteIndices::RemoteIndexList::ModifyIterator RemoteModifier;
+ typedef typename RemoteIndices::RemoteIndexList::const_iterator RemoteIterator;
+ typedef Dune::SLList<std::pair<GlobalIndex,Attribute>, typename RemoteIndices::RemoteIndexList::Allocator> GlobalList;
+ typedef typename GlobalList::ModifyIterator GlobalModifier;
+ typedef std::tuple<RemoteModifier,GlobalModifier,const RemoteIterator,const typename GlobalList::const_iterator,
+ const GlobalList*, const typename RemoteIndices::RemoteIndexList*> IteratorTuple;
+ typedef std::map<int,IteratorTuple> IteratorMap;
+ typedef typename RemoteIndices::const_iterator RemoteMapIterator;
+
+ int rank;
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+
+ std::map<int,GlobalList> globalLists;
+
+ IteratorMap iterators;
+ RemoteMapIterator rmEnd = remoteIndices.end();
+
+ for(RemoteMapIterator remote = remoteIndices.begin();
+ remote != rmEnd; ++remote) {
+ // Initialize global indices
+ GlobalList& gList=globalLists[remote->first];
+ const RemoteIterator rend = remote->second.first->end();
+
+ for(RemoteIterator index= remote->second.first->begin();
+ index != rend; ++index)
+ gList.push_back(std::make_pair(index->localIndexPair().global(),
+ index->localIndexPair().local().attribute()));
+
+ assert(gList.size()==remote->second.first->size());
+ std::cout << "Size of remote indices is "<<gList.size()<<std::endl;
+
+ iterators.insert(std::make_pair(remote->first,
+ IteratorTuple(remote->second.first->beginModify(),
+ gList.beginModify(),
+ rend,
+ gList.end(),
+ &gList,
+ remote->second.first)));
+ }
+
+ indices.beginResize();
+
+ const IndexIterator endIndex = indices.end();
+ for(IndexIterator index = indices.begin(); index != endIndex; ++index) {
+ if(index->local().attribute()==overlap) {
+ std::cout << rank<<": Deleting "<<*index<<std::endl;
+
+ indices.markAsDeleted(index);
+
+ // Delete corresponding indices in all remote index lists.
+ typedef typename IteratorMap::iterator iterator;
+ iterator end = iterators.end();
+
+ for(iterator remote = iterators.begin(); remote != end; ++remote) {
+
+ // Search for the index
+ while(std::get<0>(remote->second) != std::get<2>(remote->second)
+ && *(std::get<1>(remote->second)) < *index) {
+ // increment all iterators
+ ++(std::get<0>(remote->second));
+ ++(std::get<1>(remote->second));
+ if(std::get<0>(remote->second)!=std::get<2>(remote->second))
+ assert(std::get<1>(remote->second)!=std::get<3>(remote->second));
+ }
+
+ // Delete the entry if present
+ if(std::get<0>(remote->second) != std::get<2>(remote->second)) {
+ assert(std::get<1>(remote->second) != std::get<3>(remote->second));
+
+ if(*(std::get<1>(remote->second)) == *index) {
+
+ std::cout<<rank<<": Deleting remote "<<
+ std::get<1>(remote->second)->first<<", "<<
+ std::get<1>(remote->second)->second<<" of process "
+ << remote->first<<std::endl;
+
+ // Delete entries
+ std::get<0>(remote->second).remove();
+ std::get<1>(remote->second).remove();
+ assert(std::get<4>(remote->second)->size()==std::get<5>(remote->second)->size());
+ }
+ }
+ }
+ }
+ }
+
+ indices.endResize();
+
+ // Update the pointers to the local index pairs
+ Dune::repairLocalIndexPointers(globalLists, remoteIndices, indices);
+ globalLists.clear();
+}
+
+
+template<typename T>
+bool areEqual(T& indices,
+ Dune::RemoteIndices<T>& remoteIndices,
+ T& oIndices,
+ Dune::RemoteIndices<T>& oRemoteIndices){
+
+ typedef typename T::iterator IndexIterator;
+ typedef Dune::RemoteIndices<T> RemoteIndices;
+ typedef typename RemoteIndices::RemoteIndexList::iterator RemoteIterator;
+
+ IndexIterator iEnd = indices.end();
+ bool ret=true;
+ int rank;
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+
+ // Test the index sets
+ if(indices.size() != oIndices.size()) {
+ std::cerr<< rank<<": Size of index set is unequal!"<<std::endl;
+ ret= false;
+ }
+ for(IndexIterator index = indices.begin(), oIndex = oIndices.begin();
+ index != iEnd; ++index, ++oIndex) {
+ if( index->global() != oIndex->global()) {
+ std::cerr<<rank<<": Entry for "<<index->global() <<" is missing!"<<std::endl;
+ ret = false;
+ }
+ else if(index->local().attribute() !=oIndex->local().attribute()) {
+ std::cerr<<rank<<": Entry for "<<index->global() <<" has wrong attribute: "<<
+ index->local().attribute()<< "!= "<<oIndex->local().attribute()<<std::endl;
+ ret = false;
+ }
+ }
+
+ // Test the remote index lists
+ typedef typename RemoteIndices::RemoteIndexMap::const_iterator RemoteMapIterator;
+
+ RemoteMapIterator rmEnd = remoteIndices.end();
+
+ for(RemoteMapIterator remote = remoteIndices.begin(),
+ oRemote = oRemoteIndices.begin();
+ remote != rmEnd; ++remote, ++oRemote) {
+ if(oRemote->second.first->size() != remote->second.first->size()) {
+ std::cerr <<rank<<": Size of remote index list for process "<<remote->first
+ <<" does not match!"<<std::endl;
+ ret=false;
+ }
+
+ RemoteIterator rEnd = oRemote->second.first->end();
+ for(RemoteIterator rIndex= remote->second.first->begin(),
+ oRIndex = oRemote->second.first->begin(); oRIndex != rEnd;
+ ++rIndex, ++oRIndex) {
+
+ if(rIndex->localIndexPair().global() != oRIndex->localIndexPair().global()) {
+
+ std::cerr<<rank<<": Remote Entry for "<< rIndex->localIndexPair().global()
+ <<" is missing for process "<<remote->first<<std::endl;
+ ret = false;
+ }
+
+ if(rIndex->attribute() != oRIndex->attribute()) {
+ std::cerr<<rank<<": Attribute for entry "<<rIndex->localIndexPair().global()
+ <<" for process "<< remote->first<<" is wrong: "
+ <<rIndex->attribute()<<" != "<<oRIndex->attribute()<<std::endl;
+ ret = false;
+ }
+ }
+ }
+
+ return ret;
+}
+
+template<typename T>
+void addFakeRemoteIndices(T& indices,
+ T& oIndices,
+ Dune::RemoteIndices<T>& remoteIndices,
+ Dune::RemoteIndices<T>& oRemoteIndices){
+ typedef typename T::iterator IndexIterator;
+ typedef typename T::GlobalIndex GlobalIndex;
+ typedef typename T::LocalIndex::Attribute Attribute;
+ typedef typename Dune::RemoteIndices<T>::RemoteIndexList RemoteIndexList;
+ assert(remoteIndices.neighbours()==0 && oRemoteIndices.neighbours()==0);
+
+ RemoteIndexList* rlist = new RemoteIndexList();
+ RemoteIndexList* orlist = new RemoteIndexList();
+ int added=0;
+ IndexIterator iEnd = indices.end();
+
+ for(IndexIterator index = indices.begin(), oIndex = oIndices.begin();
+ index != iEnd; ++index, ++oIndex) {
+ assert(*index == *oIndex);
+ if(index->local().attribute()==overlap) {
+ added++;
+ rlist->push_back(Dune::RemoteIndex<GlobalIndex,Attribute>(owner,&(*index)));
+ orlist->push_back(Dune::RemoteIndex<GlobalIndex,Attribute>(owner,&(*oIndex)));
+ }
+ }
+
+
+ remoteIndices.remoteIndices_.insert(std::make_pair(1,std::make_pair(rlist,rlist)));
+ oRemoteIndices.remoteIndices_.insert(std::make_pair(1,std::make_pair(orlist,orlist)));
+
+ std::cout<<"Added "<<added<<" fake remote indices!"<<std::endl;
+}
+
+bool testIndicesSyncer()
+{
+ //using namespace Dune;
+
+ // The global grid size
+ const int Nx = 6;
+ const int Ny = 1;
+
+ // Process configuration
+ int procs, rank;
+ MPI_Comm_size(MPI_COMM_WORLD, &procs);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+
+ // The local grid
+ int nx = Nx/procs;
+ int first=Nx%procs;
+ // distributed indexset
+ // typedef ParallelLocalIndex<GridFlags> LocalIndexType;
+
+ typedef Dune::ParallelIndexSet<int,Dune::ParallelLocalIndex<GridFlags> > ParallelIndexSet;
+ ParallelIndexSet indexSet, changedIndexSet;
+
+ // Set up the indexsets.
+ int start,end, ostart, oend;
+ if(rank<first) {
+ start = rank*nx+rank;
+ end = rank +rank * nx+nx+1;
+ }else{
+ start = first+rank*nx;
+ end = first +rank*nx +nx;
+ }
+
+ if(rank>0 &&start<Nx)
+ ostart=start-1;
+ else
+ ostart=start;
+
+ if(rank<procs-1 &&end<Nx)
+ oend=end+1;
+ else
+ oend=end;
+
+ std::cout<<rank<<": ostart="<<ostart<<" start="<<start<<" end="<<end<<" oend="<<oend<<std::endl;
+ //return true;
+
+ indexSet.beginResize();
+ changedIndexSet.beginResize();
+
+ int localIndex=0;
+
+ for(int j=0; j<Ny; j++)
+ for(int i=ostart; i<oend; i++) {
+ bool isPublic = (i<start+1)||(i>=end-1);
+ GridFlags flag = owner;
+ if((i==ostart && (i!=0))||(i==end && (i!=Nx-1))) {
+ flag = overlap;
+ }
+
+ indexSet.add(i+j*Nx, Dune::ParallelLocalIndex<GridFlags> (localIndex,flag,isPublic));
+ changedIndexSet.add(i+j*Nx, Dune::ParallelLocalIndex<GridFlags> (localIndex++,flag,isPublic));
+ }
+
+ indexSet.endResize();
+ changedIndexSet.endResize();
+
+ Dune::RemoteIndices<ParallelIndexSet> remoteIndices(indexSet, indexSet, MPI_COMM_WORLD);
+ Dune::RemoteIndices<ParallelIndexSet> changedRemoteIndices(changedIndexSet, changedIndexSet, MPI_COMM_WORLD);
+
+ remoteIndices.rebuild<false>();
+ changedRemoteIndices.rebuild<false>();
+
+
+ std::cout<<rank<<": Unchanged: "<<indexSet<<std::endl<<remoteIndices<<std::endl;
+ assert(areEqual(indexSet, remoteIndices,changedIndexSet, changedRemoteIndices));
+
+ std::cout<<"Deleting entries!"<<std::endl;
+
+ //if(procs==1)
+ //addFakeRemoteIndices(indexSet, changedIndexSet, remoteIndices, changedRemoteIndices);
+
+ deleteOverlapEntries(changedIndexSet, changedRemoteIndices);
+ std::cout<<rank<<": Changed: "<<changedIndexSet<<std::endl<<changedRemoteIndices<<std::endl;
+
+ Dune::IndicesSyncer<ParallelIndexSet> syncer(changedIndexSet, changedRemoteIndices);
+ // return 0;
+
+ std::cout<<"Syncing!"<<std::endl;
+
+ syncer.sync();
+
+ std::cout<<rank<<": Synced: "<<changedIndexSet<<std::endl<<changedRemoteIndices<<std::endl;
+ if( areEqual(indexSet, remoteIndices,changedIndexSet, changedRemoteIndices))
+ return true;
+ else{
+ std::cerr<<"Output not equal!"<<std::endl;
+ return false;
+ }
+
+
+}
+
+/**
+ * @brief MPI Error.
+ * Thrown when an mpi error occurs.
+ */
+class MPIError {
+public:
+ /** @brief Constructor. */
+ MPIError(std::string s, int e) : errorstring(s), errorcode(e){}
+ /** @brief The error string. */
+ std::string errorstring;
+ /** @brief The mpi error code. */
+ int errorcode;
+};
+
+void MPI_err_handler([[maybe_unused]] MPI_Comm *comm, int *err_code, ...)
+{
+ char *err_string=new char[MPI_MAX_ERROR_STRING];
+ int err_length;
+ MPI_Error_string(*err_code, err_string, &err_length);
+ std::string s(err_string, err_length);
+ std::cerr << "An MPI Error occurred:"<<std::endl<<s<<std::endl;
+ delete[] err_string;
+ throw MPIError(s, *err_code);
+}
+
+int main(int argc, char** argv){
+ MPI_Init(&argc, &argv);
+ MPI_Errhandler handler;
+ MPI_Comm_create_errhandler(MPI_err_handler, &handler);
+ MPI_Comm_set_errhandler(MPI_COMM_WORLD, handler);
+ int procs, rank;
+ MPI_Comm_size(MPI_COMM_WORLD, &procs);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ bool ret=testIndicesSyncer();
+ MPI_Barrier(MPI_COMM_WORLD);
+ std::cout<<rank<<": ENd="<<ret<<std::endl;
+ if(!ret)
+ MPI_Abort(MPI_COMM_WORLD, 1);
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+// vi: set et ts=4 sw=4 sts=4:
+#include <config.h>
+
+#include <cstddef>
+#include <iostream>
+#include <set>
+#include <utility>
+#include <vector>
+
+#include <mpi.h>
+
+#include <dune/common/parallel/interface.hh>
+#include <dune/common/parallel/variablesizecommunicator.hh>
+
+// For each communicated index, send convert the index to a `double` and send
+// it three times on the sending side. On the receiving side, simply print
+// the received data.
+struct MyDataHandle
+{
+ std::set<int> dataSendAt;
+ std::set<int> dataRecievedAt;
+
+ MyDataHandle(int r)
+ : rank(r)
+ {
+ }
+ int rank;
+
+ typedef double DataType;
+
+ bool fixedSize()
+ {
+ return true;
+ }
+ void verify(int procs, int start, int end) {
+ std::vector<int> indices;
+
+ if(procs==1) {
+ for(int k=0;k<=10;k+=2) {
+ indices.push_back(k);
+ }
+ }
+ else {
+ if(rank && rank < procs) {
+ indices.push_back(start-1);
+ indices.push_back(start);
+ }
+ if(rank < procs-1) {
+ indices.push_back(end-1);
+ indices.push_back(end);
+ }
+ }
+
+ std::set<int>::iterator it;
+ for(int idx : indices) {
+ it = dataSendAt.find(idx);
+ if(it == dataSendAt.end()) {
+ std::cerr << rank << ": No data send at index " << idx << "!" << std::endl;
+ std::abort();
+ }
+ dataSendAt.erase(it);
+
+ it = dataRecievedAt.find(idx);
+ if(it == dataRecievedAt.end()) {
+ std::cerr << rank << ": No data recieved at index " << idx << "!" << std::endl;
+ std::abort();
+ }
+ dataRecievedAt.erase(it);
+ }
+ for(const int &i : dataSendAt) {
+ std::cerr << rank << ": Unexpected data send at index " << i << "!" << std::endl;
+ std::abort();
+ }
+ for(const int &i : dataRecievedAt) {
+ std::cerr << rank << ": Unexpected data recieved at index " << i << "!" << std::endl;
+ std::abort();
+ }
+ }
+ template<class B>
+ void gather(B& buffer, int i)
+ {
+ if(!dataSendAt.insert(i).second) {
+ std::cerr << rank << ": Gather() was called twice for index " << i << "!" << std::endl;
+ std::abort();
+ }
+
+ std::cout<<rank<<": Gathering "<<i<<std::endl;
+ double d=i;
+ buffer.write(d);
+ buffer.write(d);
+ buffer.write(d);
+ }
+ template<class B>
+ void scatter(B& buffer, int i, int size)
+ {
+ if(!dataRecievedAt.insert(i).second) {
+ std::cerr << rank << ": Scatter() was called twice for index " << i << "!" << std::endl;
+ std::abort();
+ }
+
+ std::cout<<rank<<": Scattering "<<size<<" entries for "<<i<<": ";
+ if(size != 3) {
+ std::cerr << "\n" << rank <<": Number of communicated entries does not match!" << std::endl;
+ std::abort();
+ }
+
+ for(;size>0;--size)
+ {
+ double index;
+ buffer.read(index);
+ std::cout<<index<<" ";
+ if(i != index) {
+ std::cerr << "\n" << rank << ": Communicated value does not match!" << std::endl;
+ std::abort();
+ }
+ }
+ std::cout<<std::endl;
+ }
+ std::size_t size([[maybe_unused]] int i)
+ {
+ return 3;
+ }
+};
+
+struct MyDataHandle1D : public MyDataHandle
+{
+ MyDataHandle1D(int r) : MyDataHandle(r) {}
+
+ template<class B>
+ void scatter(B& buffer, int i, int size)
+ {
+ if(!dataRecievedAt.insert(i).second) {
+ std::cerr << rank << ": Scatter() was called twice for index " << i << "!" << std::endl;
+ std::abort();
+ }
+
+ std::cout<<rank<<": Scattering "<<size<<" entries for "<<i<<": ";
+ if(size != 3) {
+ std::cerr << "\n" << rank <<": Number of communicated entries does not match!" << std::endl;
+ std::abort();
+ }
+
+ for(;size>0;--size)
+ {
+ double index;
+ buffer.read(index);
+ std::cout<<index<<" ";
+ if(10-i != index) {
+ std::cerr << "\n" << rank << ": Communicated value does not match!" << std::endl;
+ std::abort();
+ }
+ }
+ std::cout<<std::endl;
+ }
+};
+
+// On the sending side, for each index to send, send bewteen 0 and 4 numbers
+// (precisely: `index % 5` numbers). The first number is the index converted
+// to `double`, incrementing by one for each consecutive number. On the
+// receiving side just print the received numbers.
+struct VarDataHandle
+{
+ std::set<int> dataSendAt;
+ std::set<int> dataRecievedAt;
+
+ VarDataHandle(int r)
+ : rank(r)
+ {}
+ int rank;
+ typedef double DataType;
+ bool fixedSize()
+ {
+ return false;
+ }
+ void verify(int procs, int start, int end) {
+ std::vector<int> indices;
+ if(procs==1) {
+ for(int k=0;k<=10;k+=2) {
+ indices.push_back(k);
+ }
+ }
+ else {
+ if(rank && rank < procs) {
+ indices.push_back(start-1);
+ indices.push_back(start);
+ }
+ if(rank < procs-1) {
+ indices.push_back(end-1);
+ indices.push_back(end);
+ }
+ }
+
+ std::set<int>::iterator it;
+ for(int idx : indices) {
+ it = dataSendAt.find(idx);
+ if(it == dataSendAt.end()) {
+ std::cerr << rank << ": No data send at index " << idx << "!" << std::endl;
+ std::abort();
+ }
+ dataSendAt.erase(it);
+
+ it = dataRecievedAt.find(idx);
+ if(it == dataRecievedAt.end() && idx%5) {
+ std::cerr << rank << ": No data recieved at index " << idx << "!" << std::endl;
+ std::abort();
+ }
+ else if(it != dataRecievedAt.end()) {
+ dataRecievedAt.erase(it);
+ }
+ }
+ for(const int &i : dataSendAt) {
+ std::cerr << rank << ": Unexpected data send at index " << i << "!" << std::endl;
+ std::abort();
+ }
+ for(const int &i : dataRecievedAt) {
+ std::cerr << rank << ": Unexpected data recieved at index " << i << "!" << std::endl;
+ std::abort();
+ }
+ }
+ template<class B>
+ void gather(B& buffer, int i)
+ {
+ if(!dataSendAt.insert(i).second) {
+ std::cerr << rank << ": Gather() was called twice for index " << i << "!" << std::endl;
+ std::abort();
+ }
+
+ std::size_t s=i%5;
+ std::cout<<rank<<": Gathering "<<s<<" entries for index "<<i<<std::endl;
+ for(std::size_t j=0; j<s; j++)
+ buffer.write(static_cast<double>(i+j));
+ }
+ template<class B>
+ void scatter(B& buffer, int i, int size)
+ {
+ if(!dataRecievedAt.insert(i).second) {
+ std::cerr << rank << ": Scatter() was called twice for index " << i << "!" << std::endl;
+ std::abort();
+ }
+
+ std::cout<<rank<<": Scattering "<<size<<" entries for "<<i<<": ";
+ if(size != i%5) {
+ std::cerr << "\n" << rank <<": Number of communicated entries does not match!" << std::endl;
+ std::abort();
+ }
+
+ for(int k=0; k<size; k++)
+ {
+ double index;
+ buffer.read(index);
+ std::cout<<index<<" ";
+ if(index != i+k) {
+ std::cerr << "\n" << rank << ": Communicated value does not match!" << std::endl;
+ std::abort();
+ }
+ }
+ std::cout<<std::endl;
+ }
+ std::size_t size(int i)
+ {
+ return i%5;
+ }
+
+};
+
+struct VarDataHandle1D : public VarDataHandle
+{
+ VarDataHandle1D(int r) : VarDataHandle(r) {}
+
+ template<class B>
+ void scatter(B& buffer, int i, int size)
+ {
+ if(!dataRecievedAt.insert(i).second) {
+ std::cerr << rank << ": Scatter() was called twice for index " << i << "!" << std::endl;
+ std::abort();
+ }
+
+ std::cout<<rank<<": Scattering "<<size<<" entries for "<<i<<": ";
+ if(size != (10-i)%5) {
+ std::cerr << "\n" << rank <<": Number of communicated entries does not match!" << std::endl;
+ std::abort();
+ }
+
+ for(int k=0; k<size; k++)
+ {
+ double index;
+ buffer.read(index);
+ std::cout<<index<<" ";
+ if(index != 10-i+k) {
+ std::cerr << "\n" << rank << ": Communicated value does not match!" << std::endl;
+ std::abort();
+ }
+ }
+ std::cout<<std::endl;
+ }
+};
+
+int main(int argc, char** argv)
+{
+ MPI_Init(&argc, &argv);
+ int procs, rank;
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &procs);
+ if(procs==1)
+ {
+ // Invent a consecutive index set with 11 indices [0, 10]. Set every
+ // even index to communicate with ourself. Then use the data handles
+ // defined at the top of this file to do some test communications.
+ typedef Dune::VariableSizeCommunicator<>::InterfaceMap Interface;
+ Dune::InterfaceInformation send, recv;
+ send.reserve(6);
+ for(std::size_t i=0; i<=10; i+=2)
+ send.add(i);
+ recv.reserve(6);
+ for(std::size_t i=10; i<=10; i-=2)
+ recv.add(i);
+ Interface inf;
+ inf[0]=std::make_pair(send, recv);
+ Dune::VariableSizeCommunicator<> comm(MPI_COMM_SELF, inf, 6);
+ MyDataHandle1D handle(0);
+ comm.forward(handle);
+ handle.verify(procs, 0, 0);
+ std::cout<<"===================== backward ========================="<<std::endl;
+ comm.backward(handle);
+ handle.verify(procs, 0, 0);
+ std::cout<<"================== variable size ======================="<<std::endl;
+ VarDataHandle1D vhandle(0);
+ comm.forward(vhandle);
+ vhandle.verify(procs, 0, 0);
+ std::cout<<"===================== backward ========================="<<std::endl;
+ comm.backward(vhandle);
+ vhandle.verify(procs, 0, 0);
+ }
+ else
+ {
+ // We also want to check the case where the interface is empty on some
+ // processes. Therefore we artificially lower the numer of processes
+ // if it is larger than two. Thus the last rank will not send anything
+ // and we check for deadlocks.
+ if(procs>2)
+ --procs;
+
+ // Partition a consecutive set of indices among all active ranks
+ // (where the final rank possibly excluded above is considered
+ // inactive). Set up interfaces so each rank communicates with its
+ // predecessors at the two indices next to the common partition
+ // boundary, and likewise for the successor. Then use the data
+ // handles defined at the top of this file to do some test
+ // communications.
+ int N=100000; // number of indices
+ int num_per_proc=N/procs;
+ // start is our first index, end is one-past our last index.
+ int start, end;
+ if(rank<N%procs)
+ {
+ // if the #active ranks does not divide #indices, lower ranks get
+ // an additional index in their range
+ start=rank*(num_per_proc+1);
+ end=(rank+1)*(num_per_proc+1);
+ }
+ else
+ {
+ start=(N%procs)+rank*(num_per_proc);
+ end=start+num_per_proc;
+ }
+ // sanity check
+ if(rank==procs-1)
+ assert(N==end);
+ typedef Dune::VariableSizeCommunicator<>::InterfaceMap Interface;
+ Interface inf;
+ if(rank && rank<procs) // rank==procs might hold and produce a deadlock otherwise!
+ {
+ // left interface: communicate our first index and our
+ // predecessor's last index with our predecessor
+ Dune::InterfaceInformation send, recv;
+ send.reserve(2);
+ recv.reserve(2);
+ send.add(start-1);
+ send.add(start);
+ recv.add(start-1);
+ recv.add(start);
+ inf[rank-1]=std::make_pair(send, recv);
+ }
+ if(rank<procs-1)
+ {
+ // right interface: communicate our last index and our successor's
+ // first index with our successor
+ Dune::InterfaceInformation send, recv;
+ send.reserve(2);
+ recv.reserve(2);
+ send.add(end-1);
+ send.add(end);
+ recv.add(end-1);
+ recv.add(end);
+ inf[rank+1]=std::make_pair(send, recv);
+ }
+ // report inactive rank
+ if(rank==procs)
+ std::cout<<" rank "<<rank<<" has empty interface "<<inf.size()<<std::endl;
+
+ Dune::VariableSizeCommunicator<> comm(MPI_COMM_WORLD, inf, 6);
+ MyDataHandle handle(rank);
+ comm.forward(handle);
+ MPI_Barrier(MPI_COMM_WORLD);
+ handle.verify(procs, start, end);
+ MPI_Barrier(MPI_COMM_WORLD);
+ if(rank==0)
+ std::cout<<"===================== backward ========================="<<std::endl;
+ MPI_Barrier(MPI_COMM_WORLD);
+ comm.backward(handle);
+ MPI_Barrier(MPI_COMM_WORLD);
+ handle.verify(procs, start, end);
+ MPI_Barrier(MPI_COMM_WORLD);
+ if(rank==0)
+ std::cout<<"================== variable size ======================="<<std::endl;
+ MPI_Barrier(MPI_COMM_WORLD);
+ VarDataHandle vhandle(rank);
+ MPI_Barrier(MPI_COMM_WORLD);
+ comm.forward(vhandle);
+ MPI_Barrier(MPI_COMM_WORLD);
+ vhandle.verify(procs, start, end);
+ MPI_Barrier(MPI_COMM_WORLD);
+ if(rank==0)
+ std::cout<<"===================== backward ========================="<<std::endl;
+ MPI_Barrier(MPI_COMM_WORLD);
+ comm.backward(vhandle);
+ MPI_Barrier(MPI_COMM_WORLD);
+ vhandle.verify(procs, start, end);
+ }
+
+ MPI_Finalize();
+
+ return 0;
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_COMMON_PARALLEL_VARIABLESIZECOMMUNICATOR_HH // Still fits the line!
+#define DUNE_COMMON_PARALLEL_VARIABLESIZECOMMUNICATOR_HH
+
+#if HAVE_MPI
+
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <functional>
+#include <map>
+#include <memory>
+#include <utility>
+#include <vector>
+#include <algorithm>
+
+#include <mpi.h>
+
+#include <dune/common/concept.hh>
+#include <dune/common/parallel/interface.hh>
+#include <dune/common/parallel/mpitraits.hh>
+
+/**
+ * @addtogroup Common_Parallel
+ *
+ * @{
+ */
+/**
+ * @file
+ * @brief A communicator that only needs to know the number of elements per
+ * index at the sender side.
+ * @author Markus Blatt
+ * @}
+ */
+namespace Dune
+{
+
+namespace Concept {
+
+struct HasFixedSize {
+ template <typename H> auto require(H &&h) -> decltype(h.fixedSize());
+};
+
+} // namespace Concept
+
+namespace Impl {
+
+template <typename H,
+ std::enable_if_t<models<Concept::HasFixedSize, H>(), int> = 0>
+constexpr bool callFixedSize(H &&handle) {
+ return handle.fixedSize();
+}
+
+template <typename H,
+ std::enable_if_t<not models<Concept::HasFixedSize, H>(), int> = 0>
+[[deprecated("Using handles with fixedsize() (lower case s) is deprecated and "
+ "will be removed after release 2.8. Implement fixedSize() "
+ "(camelCase) instead!")]]
+constexpr bool callFixedSize(H &&handle) {
+ return handle.fixedsize();
+}
+
+} // namespace Impl
+
+namespace
+{
+/**
+ * @brief A message buffer.
+ * @tparam T The type of data that the buffer will hold.
+ */
+template<class T, class Allocator=std::allocator<T> >
+class MessageBuffer
+{
+public:
+ /**
+ * @brief Constructs a message.
+ * @param size The number of elements that buffer should hold,
+ */
+ explicit MessageBuffer(int size)
+ : buffer_(new T[size]), size_(size), position_(0)
+ {}
+ /**
+ * @brief Copy constructor.
+ * @param o The instance to copy.
+ */
+ explicit MessageBuffer(const MessageBuffer& o)
+ : buffer_(new T[o.size_]), size_(o.size_), position_(o.position_)
+ {
+ }
+ /** @brief Destructor. */
+ ~MessageBuffer()
+ {
+ delete[] buffer_;
+ }
+ /**
+ * @brief Write an item to the buffer.
+ * @param data The data item to write.
+ */
+ void write(const T& data)
+ {
+ buffer_[position_++]=data;
+ }
+
+ /**
+ * @brief Reads a data item from the buffer
+ * @param[out] data Reference to where to store the read data.
+ */
+ void read(T& data)
+ {
+ data=buffer_[position_++];
+ }
+
+ /**
+ * @brief Reset the buffer.
+ *
+ * On return the buffer will be positioned at the start again.
+ */
+ void reset()
+ {
+ position_=0;
+ }
+
+ /**
+ * @brief Test whether the whole buffer was read.
+ * @return True if we read or wrot until the end of the buffer.
+ */
+ bool finished()
+ {
+ return position_==size_;
+ }
+
+ /**
+ * @brief Tests whether the buffer has enough space left to read/write data.
+ * @param notItems The number of items to read or write.
+ * @return True if there is enough space for noItems items.
+ */
+ bool hasSpaceForItems(int noItems)
+ {
+ return position_+noItems<=size_;
+ }
+ /**
+ * @brief Get the size of the buffer.
+ * @return The number of elements the buffer can hold.
+ */
+ std::size_t size() const
+ {
+ return size_;
+ }
+ /**
+ * @brief Converts the buffer to a C array.
+ * @return The underlying C array.
+ */
+ operator T*()
+ {
+ return buffer_;
+ }
+
+private:
+ /**
+ * @brief Pointer to the current insertion point of the buffer.
+ */
+ T* buffer_;
+ /**
+ * @brief The size of the buffer
+ */
+ std::size_t size_;
+ /**
+ * @brief The current position in the buffer.
+ */
+ std::size_t position_;
+};
+
+/**
+ * @brief A tracker for the current position in a communication interface.
+ */
+class InterfaceTracker
+{
+public:
+ /**
+ * @brief Constructor.
+ * @param rank The other rank that the interface communicates with.
+ * @param info A list of local indices belonging to this interface.
+ */
+ InterfaceTracker(int rank, InterfaceInformation info, std::size_t fixedsize=0,
+ bool allocateSizes=false)
+ : fixedSize(fixedsize),rank_(rank), index_(), interface_(info), sizes_()
+ {
+ if(allocateSizes)
+ {
+ sizes_.resize(info.size());
+ }
+ }
+
+ /**
+ * @brief Moves to the next index in the interface.
+ */
+ void moveToNextIndex()
+ {
+ index_++;
+ assert(index_<=interface_.size());
+ skipZeroIndices();
+ }
+ /**
+ * @brief Increment index various times.
+ * @param i The number of times to increment.
+ */
+ void increment(std::size_t i)
+ {
+ index_+=i;
+ assert(index_<=interface_.size());
+ }
+ /**
+ * @brief Checks whether all indices have been visited.
+ * @return True if all indices have been visited.
+ */
+ bool finished() const
+ {
+ return index_==interface_.size();
+ }
+
+ void skipZeroIndices()
+ {
+ // skip indices with size zero!
+ while(sizes_.size() && index_!=interface_.size() &&!size())
+ ++index_;
+ }
+
+ /**
+ * @brief Get the current local index of the interface.
+ * @return The current local index of the interface.
+ */
+ std::size_t index() const
+ {
+ return interface_[index_];
+ }
+ /**
+ * @brief Get the size at the current index.
+ */
+ std::size_t size() const
+ {
+ assert(sizes_.size());
+ return sizes_[index_];
+ }
+ /**
+ * @brief Get a pointer to the array with the sizes.
+ */
+ std::size_t* getSizesPointer()
+ {
+ return &sizes_[0];
+ }
+ /**
+ * @brief Returns whether the interface is empty.
+ * @return True if there are no entries in the interface.
+ */
+ bool empty() const
+ {
+ return !interface_.size();
+ }
+
+ /**
+ * @brief Checks whether there are still indices waiting to be processed.
+ * @return True if there are still indices waiting to be processed.
+ */
+ std::size_t indicesLeft() const
+ {
+ return interface_.size()-index_;
+ }
+ /**
+ * @brief The number of data items per index if it is fixed, 0 otherwise.
+ */
+ std::size_t fixedSize;
+ /**
+ * @brief Get the process rank that this communication interface is with.
+ */
+ int rank() const
+ {
+ return rank_;
+ }
+ /**
+ * @brief Get the offset to the first index.
+ */
+ std::size_t offset() const
+ {
+ return index_;
+ }
+private:
+ /** @brief The process rank that this communication interface is with. */
+ int rank_;
+ /** @brief The other rank that this interface communcates with. */
+ std::size_t index_;
+ /** @brief The list of local indices of this interface. */
+ InterfaceInformation interface_;
+ std::vector<std::size_t> sizes_;
+};
+
+
+} // end unnamed namespace
+
+/**
+ * @addtogroup Common_Parallel
+ *
+ * @{
+ */
+/**
+ * @brief A buffered communicator where the amount of data sent does not have to be known a priori.
+ *
+ * In contrast to BufferedCommunicator the amount of data is determined by the container
+ * whose entries are sent and not known at the receiving side a priori.
+ *
+ * Note that there is no global index-space, only local index-spaces on each
+ * rank. Note also that each rank has two index-spaces, one used for
+ * gathering/sending, and one used for scattering/receiving. These may be the
+ * identical, but they do not have to be.
+ *
+ * For data send from rank A to rank B, the order that rank A inserts its
+ * indices into its send-interface for rank B has to be the same order that
+ * rank B inserts its matching indices into its receive interface for rank A.
+ * (This is because the `VariableSizeCommunicator` has no concept of a global
+ * index-space, so the order used to insert the indices into the interfaces is
+ * the only clue it has to know which source index should be communicated to
+ * which target index.)
+ *
+ * It is permissible for a rank to communicate with itself, i.e. it can define
+ * send- and receive-interfaces to itself. These interfaces do not need to
+ * contain the same indices, as the local send index-space can be different
+ * from the local receive index-space. This is useful for repartitioning or
+ * for aggregating in AMG.
+ *
+ * Do not assume that gathering to an index happens before scattering to the
+ * same index in the same communication, as `VariableSizeCommunicator` assumes
+ * they are from different index-spaces. This is a pitfall if you want do
+ * communicate a vector in-place, e.g. to sum up partial results from
+ * different ranks. Instead, have separate source and target vectors and copy
+ * the source vector to the target vector before communicating.
+ */
+template<class Allocator=std::allocator<std::pair<InterfaceInformation,InterfaceInformation> > >
+class VariableSizeCommunicator
+{
+public:
+ /**
+ * @brief The type of the map from process number to InterfaceInformation for
+ * sending and receiving to and from it.
+ */
+ typedef std::map<int,std::pair<InterfaceInformation,InterfaceInformation>,
+ std::less<int>,
+ typename std::allocator_traits<Allocator>::template rebind_alloc< std::pair<const int,std::pair<InterfaceInformation,InterfaceInformation> > > > InterfaceMap;
+
+#ifndef DUNE_PARALLEL_MAX_COMMUNICATION_BUFFER_SIZE
+ /**
+ * @brief Creates a communicator with the default maximum buffer size.
+ *
+ * The default size ist either what the macro DUNE_MAX_COMMUNICATION_BUFFER_SIZE
+ * is set to or 32768 if is not set.
+ */
+ VariableSizeCommunicator(MPI_Comm comm, const InterfaceMap& inf)
+ : maxBufferSize_(32768), interface_(&inf)
+ {
+ MPI_Comm_dup(comm, &communicator_);
+ }
+ /**
+ * @brief Creates a communicator with the default maximum buffer size.
+ * @param inf The communication interface.
+ */
+ VariableSizeCommunicator(const Interface& inf)
+ : maxBufferSize_(32768), interface_(&inf.interfaces())
+ {
+ MPI_Comm_dup(inf.communicator(), &communicator_);
+ }
+#else
+ /**
+ * @brief Creates a communicator with the default maximum buffer size.
+ *
+ * The default size ist either what the macro DUNE_MAX_COMMUNICATION_BUFFER_SIZE
+ * is set to or 32768 if is not set.
+ */
+ VariableSizeCommunicator(MPI_Comm comm, InterfaceMap& inf)
+ : maxBufferSize_(DUNE_PARALLEL_MAX_COMMUNICATION_BUFFER_SIZE),
+ interface_(&inf)
+ {
+ MPI_Comm_dup(comm, &communicator_);
+ }
+ /**
+ * @brief Creates a communicator with the default maximum buffer size.
+ * @param inf The communication interface.
+ */
+ VariableSizeCommunicator(const Interface& inf)
+ : maxBufferSize_(DUNE_PARALLEL_MAX_COMMUNICATION_BUFFER_SIZE),
+ interface_(&inf.interfaces())
+ {
+ MPI_Comm_dup(inf.communicator(), &communicator_);
+ }
+#endif
+ /**
+ * @brief Creates a communicator with a specific maximum buffer size.
+ * @param comm The MPI communicator to use.
+ * @param inf The communication interface.
+ * @param max_buffer_size The maximum buffer size allowed.
+ */
+ VariableSizeCommunicator(MPI_Comm comm, const InterfaceMap& inf, std::size_t max_buffer_size)
+ : maxBufferSize_(max_buffer_size), interface_(&inf)
+ {
+ MPI_Comm_dup(comm, &communicator_);
+ }
+
+ /**
+ * @brief Creates a communicator with a specific maximum buffer size.
+ * @param inf The communication interface.
+ * @param max_buffer_size The maximum buffer size allowed.
+ */
+ VariableSizeCommunicator(const Interface& inf, std::size_t max_buffer_size)
+ : maxBufferSize_(max_buffer_size), interface_(&inf.interfaces())
+ {
+ MPI_Comm_dup(inf.communicator(), &communicator_);
+ }
+
+ ~VariableSizeCommunicator()
+ {
+ MPI_Comm_free(&communicator_);
+ }
+
+ /**
+ * @brief Copy-constructs a communicator
+ * @param other VariableSizeCommunicator that is copied.
+ */
+ VariableSizeCommunicator(const VariableSizeCommunicator& other) {
+ maxBufferSize_ = other.maxBufferSize_;
+ interface_ = other.interface_;
+ MPI_Comm_dup(other.communicator_, &communicator_);
+ }
+
+ /**
+ * @brief Copy-assignes a communicator
+ * @param other VariableSizeCommunicator that is copied.
+ */
+ VariableSizeCommunicator& operator=(const VariableSizeCommunicator& other) {
+ if(this == &other) // don't do anything if objects are the same
+ return *this;
+
+ maxBufferSize_ = other.maxBufferSize_;
+ interface_ = other.interface_;
+ MPI_Comm_free(&communicator_);
+ MPI_Comm_dup(other.communicator_, &communicator_);
+
+ return *this;
+ }
+
+ /**
+ * @brief Communicate forward.
+ *
+ * @tparam DataHandle The type of the handle describing the data. This type has to adhere
+ * to the following interface:
+ * \code{.cpp}
+ * // returns whether the number of data items per entry is fixed
+ * bool fixedsize();
+ * // get the number of data items for an entry with index i
+ * std::size_t size(std::size_t i);
+ * // gather the data at index i
+ * template<class MessageBuffer>
+ * void gather(MessageBuffer& buf, std::size_t i);
+ * // scatter the n data items to index i
+ * template<class MessageBuffer>
+ * void scatter(MessageBuffer& buf, std::size_t i, std::size_t n);
+ * \endcode
+ * @param handle A handle responsible for describing the data, gathering, and scattering it.
+ */
+ template<class DataHandle>
+ void forward(DataHandle& handle)
+ {
+ communicate<true>(handle);
+ }
+
+ /**
+ * @brief Communicate backwards.
+ *
+ * @tparam DataHandle The type of the handle describing the data. This type has to adhere
+ * to the following interface:
+ * \code{.cpp}
+ * // returns whether the number of data items per entry is fixed
+ * bool fixedsize();
+ * // get the number of data items for an entry with index i
+ * std::size_t size(std::size_t i);
+ * // gather the data at index i
+ * template<class MessageBuffer>
+ * void gather(MessageBuffer& buf, std::size_t i);
+ * // scatter the n data items to index i
+ * template<class MessageBuffer>
+ * void scatter(MessageBuffer& buf, std::size_t i, std::size_t n);
+ * \endcode
+ * @param handle A handle responsible for describing the data, gathering, and scattering it.
+ */
+ template<class DataHandle>
+ void backward(DataHandle& handle)
+ {
+ communicate<false>(handle);
+ }
+
+private:
+ template<bool FORWARD, class DataHandle>
+ void communicateSizes(DataHandle& handle,
+ std::vector<InterfaceTracker>& recv_trackers);
+
+ /**
+ * @brief Communicates data according to the interface.
+ * @tparam forward If true sends data forwards, otherwise backwards along the interface.
+ * @tparame DataHandle The type of the data handle @see forward for a description of the interface.
+ * @param handle The handle describing the data and responsible for gather and scatter operations.
+ */
+ template<bool forward,class DataHandle>
+ void communicate(DataHandle& handle);
+ /**
+ * @brief Initialize the trackers along the interface for the communication.
+ * @tparam FORWARD If true we send in the forward direction.
+ * @tparam DataHandle DataHandle The type of the data handle.
+ * @param handle The handle describing the data and responsible for gather
+ * and scatter operations.
+ * @param[out] send_trackers The trackers for the sending side.
+ * @param[out] recv_trackers The trackers for the receiving side.
+ */
+ template<bool FORWARD, class DataHandle>
+ void setupInterfaceTrackers(DataHandle& handle,
+ std::vector<InterfaceTracker>& send_trackers,
+ std::vector<InterfaceTracker>& recv_trackers);
+ /**
+ * @brief Communicate data with a fixed amount of data per entry.
+ * @tparam FORWARD If true we send in the forward direction.
+ * @tparam DataHandle DataHandle The type of the data handle.
+ * @param handle The handle describing the data and responsible for gather
+ * and scatter operations.
+ */
+ template<bool FORWARD, class DataHandle>
+ void communicateFixedSize(DataHandle& handle);
+ /**
+ * @brief Communicate data with a variable amount of data per entry.
+ * @tparam FORWARD If true we send in the forward direction.
+ * @tparam DataHandle DataHandle The type of the data handle.
+ * @param handle The handle describing the data and responsible for gather
+ * and scatter operations.
+ */
+ template<bool FORWARD, class DataHandle>
+ void communicateVariableSize(DataHandle& handle);
+ /**
+ * @brief The maximum size if the buffers used for gather and scatter.
+ *
+ * @note If this process has n neighbours, then a maximum of 2n buffers of this size
+ * is allocate. Memory needed will be n*sizeof(std::size_t)+n*sizeof(Datahandle::DataType)
+ */
+ std::size_t maxBufferSize_;
+ /**
+ * @brief description of the interface.
+ *
+ * This is a map of the neighboring process number to a pair of local index lists.
+ * The first is a list of indices to gather data for sending from and the second is a list of
+ * indices to scatter received data to during forward.
+ */
+ const InterfaceMap* interface_;
+ /**
+ * @brief The communicator.
+ *
+ * This is a cloned communicator to ensure there are no interferences.
+ */
+ MPI_Comm communicator_;
+};
+
+/** @} */
+namespace
+{
+/**
+ * @brief A data handle for comunicating the sizes of variable sized data.
+ */
+template<class DataHandle>
+class SizeDataHandle
+{
+public:
+ typedef std::size_t DataType;
+
+ SizeDataHandle(DataHandle& data,
+ std::vector<InterfaceTracker>& trackers)
+ : data_(data), trackers_(trackers), index_()
+ {}
+ bool fixedSize()
+ {
+ return true;
+ }
+ std::size_t size([[maybe_unused]] std::size_t i)
+ {
+ return 1;
+ }
+ template<class B>
+ void gather(B& buf, int i)
+ {
+ buf.write(data_.size(i));
+ }
+ void setReceivingIndex(std::size_t i)
+ {
+ index_=i;
+ }
+ std::size_t* getSizesPointer()
+ {
+ return trackers_[index_].getSizesPointer();
+ }
+
+private:
+ DataHandle& data_;
+ std::vector<InterfaceTracker>& trackers_;
+ int index_;
+};
+
+template<class T>
+void setReceivingIndex(T&, int)
+{}
+
+template<class T>
+void setReceivingIndex(SizeDataHandle<T>& t, int i)
+{
+ t.setReceivingIndex(i);
+}
+
+
+/**
+ * @brief Template meta program for choosing then send or receive interface
+ * information based on the direction.
+ * @tparam FORWARD If true the communication happens in the forward direction.
+ */
+template<bool FORWARD>
+struct InterfaceInformationChooser
+{
+ /**
+ * @brief Get the interface information for the sending side.
+ */
+ static const InterfaceInformation&
+ getSend(const std::pair<InterfaceInformation,InterfaceInformation>& info)
+ {
+ return info.first;
+ }
+
+ /**
+ * @brief Get the interface information for the receiving side.
+ */
+ static const InterfaceInformation&
+ getReceive(const std::pair<InterfaceInformation,InterfaceInformation>& info)
+ {
+ return info.second;
+ }
+};
+
+template<>
+struct InterfaceInformationChooser<false>
+{
+ static const InterfaceInformation&
+ getSend(const std::pair<InterfaceInformation,InterfaceInformation>& info)
+ {
+ return info.second;
+ }
+
+ static const InterfaceInformation&
+ getReceive(const std::pair<InterfaceInformation,InterfaceInformation>& info)
+ {
+ return info.first;
+ }
+};
+
+/**
+ * @brief A functor that packs entries into the message buffer.
+ * @tparam DataHandle The type of the data handle that describes
+ * the communicated data.
+ */
+template<class DataHandle>
+struct PackEntries
+{
+
+ int operator()(DataHandle& handle, InterfaceTracker& tracker,
+ MessageBuffer<typename DataHandle::DataType>& buffer,
+ [[maybe_unused]] int i) const
+ {
+ return operator()(handle,tracker,buffer);
+ }
+
+ /**
+ * @brief packs data.
+ * @param handle The handle describing the data and the gather and scatter operations.
+ * @param tracker The tracker of the interface to tell us where we are.
+ * @param buffer The buffer to use for packing.
+ * @return The number data entries that we packed.
+ */
+ int operator()(DataHandle& handle, InterfaceTracker& tracker,
+ MessageBuffer<typename DataHandle::DataType>& buffer) const
+ {
+ if(tracker.fixedSize) // fixed size if variable is >0!
+ {
+
+ std::size_t noIndices=std::min(buffer.size()/tracker.fixedSize, tracker.indicesLeft());
+ for(std::size_t i=0; i< noIndices; ++i)
+ {
+ handle.gather(buffer, tracker.index());
+ tracker.moveToNextIndex();
+ }
+ return noIndices*tracker.fixedSize;
+ }
+ else
+ {
+ int packed=0;
+ tracker.skipZeroIndices();
+ while(!tracker.finished())
+ if(buffer.hasSpaceForItems(handle.size(tracker.index())))
+ {
+ handle.gather(buffer, tracker.index());
+ packed+=handle.size(tracker.index());
+ tracker.moveToNextIndex();
+ }
+ else
+ break;
+ return packed;
+ }
+ }
+};
+
+/**
+ * @brief A functor that unpacks entries from the message buffer.
+ * @tparam DataHandle The type of the data handle that describes
+ * the communicated data.
+ */
+template<class DataHandle>
+struct UnpackEntries{
+
+ /**
+ * @brief packs data.
+ * @param handle The handle describing the data and the gather and scatter operations.
+ * @param tracker The tracker of the interface to tell us where we are.
+ * @param buffer The buffer to use for packing.
+ * @return The number data entries that we packed.
+ */
+ bool operator()(DataHandle& handle, InterfaceTracker& tracker,
+ MessageBuffer<typename DataHandle::DataType>& buffer,
+ int count=0)
+ {
+ if(tracker.fixedSize) // fixed size if variable is >0!
+ {
+ std::size_t noIndices=std::min(buffer.size()/tracker.fixedSize, tracker.indicesLeft());
+
+ for(std::size_t i=0; i< noIndices; ++i)
+ {
+ handle.scatter(buffer, tracker.index(), tracker.fixedSize);
+ tracker.moveToNextIndex();
+ }
+ return tracker.finished();
+ }
+ else
+ {
+ assert(count);
+ for(int unpacked=0;unpacked<count;)
+ {
+ assert(!tracker.finished());
+ assert(buffer.hasSpaceForItems(tracker.size()));
+ handle.scatter(buffer, tracker.index(), tracker.size());
+ unpacked+=tracker.size();
+ tracker.moveToNextIndex();
+ }
+ return tracker.finished();
+ }
+ }
+};
+
+
+/**
+ * @brief A functor that unpacks std::size_t from the message buffer.
+ */
+template<class DataHandle>
+struct UnpackSizeEntries{
+
+ /**
+ * @brief packs data.
+ * @param handle The handle describing the data and the gather and scatter operations.
+ * @param tracker The tracker of the interface to tell us where we are.
+ * @param buffer The buffer to use for packing.
+ * @return The number data entries that we packed.
+ */
+ bool operator()(SizeDataHandle<DataHandle>& handle, InterfaceTracker& tracker,
+ MessageBuffer<typename SizeDataHandle<DataHandle>::DataType>& buffer) const
+ {
+ std::size_t noIndices=std::min(buffer.size(), tracker.indicesLeft());
+ std::copy(static_cast<std::size_t*>(buffer), static_cast<std::size_t*>(buffer)+noIndices,
+ handle.getSizesPointer()+tracker.offset());
+ tracker.increment(noIndices);
+ return noIndices;
+ }
+ bool operator()(SizeDataHandle<DataHandle>& handle, InterfaceTracker& tracker,
+ MessageBuffer<typename SizeDataHandle<DataHandle>::DataType>& buffer, int) const
+ {
+ return operator()(handle,tracker,buffer);
+ }
+};
+
+/**
+ * @brief Sends the size in case of communicating a fixed amount of data per entry.
+ * @param[in] send_trackers The trackers for the sending side.
+ * @param[out] send_requests The request for the asynchronous send operations.
+ * @param[in] recv_trackers The trackers for the receiving side.
+ * @param[out] recv_requests The request for the asynchronous receive operations.
+ */
+void sendFixedSize(std::vector<InterfaceTracker>& send_trackers,
+ std::vector<MPI_Request>& send_requests,
+ std::vector<InterfaceTracker>& recv_trackers,
+ std::vector<MPI_Request>& recv_requests,
+ MPI_Comm communicator)
+{
+ typedef std::vector<InterfaceTracker>::iterator TIter;
+ std::vector<MPI_Request>::iterator mIter=recv_requests.begin();
+
+ for(TIter iter=recv_trackers.begin(), end=recv_trackers.end(); iter!=end;
+ ++iter, ++mIter)
+ {
+ MPI_Irecv(&(iter->fixedSize), 1, MPITraits<std::size_t>::getType(),
+ iter->rank(), 933881, communicator, &(*mIter));
+ }
+
+ // Send our size to all neighbours using non-blocking synchronous communication.
+ std::vector<MPI_Request>::iterator mIter1=send_requests.begin();
+ for(TIter iter=send_trackers.begin(), end=send_trackers.end();
+ iter!=end;
+ ++iter, ++mIter1)
+ {
+ MPI_Issend(&(iter->fixedSize), 1, MPITraits<std::size_t>::getType(),
+ iter->rank(), 933881, communicator, &(*mIter1));
+ }
+}
+
+
+/**
+ * @brief Functor for setting up send requests.
+ * @tparam DataHandle The type of the data handle for describing the data.
+ */
+template<class DataHandle>
+struct SetupSendRequest{
+ void operator()(DataHandle& handle,
+ InterfaceTracker& tracker,
+ MessageBuffer<typename DataHandle::DataType>& buffer,
+ MPI_Request& request,
+ MPI_Comm comm) const
+ {
+ buffer.reset();
+ int size=PackEntries<DataHandle>()(handle, tracker, buffer);
+ // Skip indices of zero size.
+ while(!tracker.finished() && !handle.size(tracker.index()))
+ tracker.moveToNextIndex();
+ if(size)
+ MPI_Issend(buffer, size, MPITraits<typename DataHandle::DataType>::getType(),
+ tracker.rank(), 933399, comm, &request);
+ }
+};
+
+
+/**
+ * @brief Functor for setting up receive requests.
+ * @tparam DataHandle The type of the data handle for describing the data.
+ */
+template<class DataHandle>
+struct SetupRecvRequest{
+ void operator()(DataHandle& /*handle*/,
+ InterfaceTracker& tracker,
+ MessageBuffer<typename DataHandle::DataType>& buffer,
+ MPI_Request& request,
+ MPI_Comm comm) const
+ {
+ buffer.reset();
+ if(tracker.indicesLeft())
+ MPI_Irecv(buffer, buffer.size(), MPITraits<typename DataHandle::DataType>::getType(),
+ tracker.rank(), 933399, comm, &request);
+ }
+};
+
+/**
+ * @brief A functor that does nothing.
+ */
+template<class DataHandle>
+struct NullPackUnpackFunctor
+{
+ int operator()(DataHandle&, InterfaceTracker&,
+ MessageBuffer<typename DataHandle::DataType>&, int)
+ {
+ return 0;
+ }
+ int operator()(DataHandle&, InterfaceTracker&,
+ MessageBuffer<typename DataHandle::DataType>&)
+ {
+ return 0;
+ }
+};
+
+/**
+ * @brief Check whether some of the requests finished and continue send/receive operation.
+ * @tparam DataHandle The type of the data handle describing the data.
+ * @tparam BufferFunctor A functor that packs or unpacks data from the buffer.
+ * E.g. NullPackUnpackFunctor.
+ * @tparam CommunicationFuntor A functor responsible for continuing the communication.
+ * @param handle The data handle describing the data.
+ * @param trackers The trackers indicating the current position in the communication.
+ * @param requests The requests to test whether they finished.
+ * @param requests2 The requests to use for setting up the continuing communication. Might
+ * be the same as requests.
+ * @param comm The MPI communicator to use.
+ * @param buffer_func The functor that does the packing or unpacking of the data.
+ */
+template<class DataHandle, class BufferFunctor, class CommunicationFunctor>
+std::size_t checkAndContinue(DataHandle& handle,
+ std::vector<InterfaceTracker>& trackers,
+ std::vector<MPI_Request>& requests,
+ std::vector<MPI_Request>& requests2,
+ std::vector<MessageBuffer<typename DataHandle::DataType> >& buffers,
+ MPI_Comm comm,
+ BufferFunctor buffer_func,
+ CommunicationFunctor comm_func,
+ bool valid=true,
+ bool getCount=false)
+{
+ std::size_t size=requests.size();
+ std::vector<MPI_Status> statuses(size);
+ int no_completed;
+ std::vector<int> indices(size, -1); // the indices for which the communication finished.
+
+ MPI_Testsome(size, &(requests[0]), &no_completed, &(indices[0]), &(statuses[0]));
+ indices.resize(no_completed);
+ for(std::vector<int>::iterator index=indices.begin(), end=indices.end();
+ index!=end; ++index)
+ {
+ InterfaceTracker& tracker=trackers[*index];
+ setReceivingIndex(handle, *index);
+ if(getCount)
+ {
+ // Get the number of entries received
+ int count;
+ MPI_Get_count(&(statuses[index-indices.begin()]),
+ MPITraits<typename DataHandle::DataType>::getType(),
+ &count);
+ // Communication completed, we can reuse the buffers, e.g. unpack or repack
+ buffer_func(handle, tracker, buffers[*index], count);
+ }else
+ buffer_func(handle, tracker, buffers[*index]);
+ tracker.skipZeroIndices();
+ if(!tracker.finished()){
+ // Maybe start another communication.
+ comm_func(handle, tracker, buffers[*index], requests2[*index], comm);
+ tracker.skipZeroIndices();
+ if(valid)
+ --no_completed; // communication not finished, decrement counter for finished ones.
+ }
+ }
+ return no_completed;
+
+}
+
+/**
+ * @brief Receive the size per data entry and set up requests for receiving the data.
+ * @tparam DataHandle The type of the data handle.
+ * @param trackers The trackers indicating the indices where we send and from which rank.
+ * @param size_requests The requests for receiving the size.
+ * @param data_requests The requests for sending the data.
+ * @param buffers The buffers to use for sending.
+ * @param comm The mpi communicator to use.
+ */
+template<class DataHandle>
+std::size_t receiveSizeAndSetupReceive(DataHandle& handle,
+ std::vector<InterfaceTracker>& trackers,
+ std::vector<MPI_Request>& size_requests,
+ std::vector<MPI_Request>& data_requests,
+ std::vector<MessageBuffer<typename DataHandle::DataType> >& buffers,
+ MPI_Comm comm)
+{
+ return checkAndContinue(handle, trackers, size_requests, data_requests, buffers, comm,
+ NullPackUnpackFunctor<DataHandle>(), SetupRecvRequest<DataHandle>(), false);
+}
+
+/**
+ * @brief Check whether send request completed and continue sending if necessary.
+ * @tparam DataHandle The type of the data handle.
+ * @param trackers The trackers indicating the indices where we send and from which rank.
+ * @param requests The requests for the asynchronous communication.
+ * @param buffers The buffers to use for sending.
+ * @param comm The mpi communicator to use.
+ */
+template<class DataHandle>
+std::size_t checkSendAndContinueSending(DataHandle& handle,
+ std::vector<InterfaceTracker>& trackers,
+ std::vector<MPI_Request>& requests,
+ std::vector<MessageBuffer<typename DataHandle::DataType> >& buffers,
+ MPI_Comm comm)
+{
+ return checkAndContinue(handle, trackers, requests, requests, buffers, comm,
+ NullPackUnpackFunctor<DataHandle>(), SetupSendRequest<DataHandle>());
+}
+
+/**
+ * @brief Check whether receive request completed and continue receiving if necessary.
+ * @tparam DataHandle The type of the data handle.
+ * @param trackers The trackers indicating the indices where we receive and from which rank.
+ * @param requests The requests for the asynchronous communication.
+ * @param buffers The buffers to use for receiving.
+ * @param comm The mpi communicator to use.
+ */
+template<class DataHandle>
+std::size_t checkReceiveAndContinueReceiving(DataHandle& handle,
+ std::vector<InterfaceTracker>& trackers,
+ std::vector<MPI_Request>& requests,
+ std::vector<MessageBuffer<typename DataHandle::DataType> >& buffers,
+ MPI_Comm comm)
+{
+ return checkAndContinue(handle, trackers, requests, requests, buffers, comm,
+ UnpackEntries<DataHandle>(), SetupRecvRequest<DataHandle>(),
+ true, !Impl::callFixedSize(handle));
+}
+
+
+bool validRecvRequests(const std::vector<MPI_Request> reqs)
+{
+ for(std::vector<MPI_Request>::const_iterator i=reqs.begin(), end=reqs.end();
+ i!=end; ++i)
+ if(*i!=MPI_REQUEST_NULL)
+ return true;
+ return false;
+}
+
+/**
+ * @brief Sets up all the send requests for the data.
+ * @tparam DataHandle The type of the data handle.
+ * @tparam Functor The type of the functor to set up the request.
+ * @param handle The data handle describing the data.
+ * @param trackers The trackers for the communication interfaces.
+ * @param buffers The buffers for the comunication. One for each neighbour.
+ * @param requests The send requests for each neighbour.
+ * @param setupFunctor The functor responsible for setting up the request.
+ */
+template<class DataHandle, class Functor>
+std::size_t setupRequests(DataHandle& handle,
+ std::vector<InterfaceTracker>& trackers,
+ std::vector<MessageBuffer<typename DataHandle::DataType> >& buffers,
+ std::vector<MPI_Request>& requests,
+ const Functor& setupFunctor,
+ MPI_Comm communicator)
+{
+ typedef typename std::vector<InterfaceTracker>::iterator TIter;
+ typename std::vector<MessageBuffer<typename DataHandle::DataType> >::iterator
+ biter=buffers.begin();
+ typename std::vector<MPI_Request>::iterator riter=requests.begin();
+ std::size_t complete=0;
+ for(TIter titer=trackers.begin(), end=trackers.end(); titer!=end; ++titer, ++biter, ++riter)
+ {
+ setupFunctor(handle, *titer, *biter, *riter, communicator);
+ complete+=titer->finished();
+ }
+ return complete;
+}
+} // end unnamed namespace
+
+template<class Allocator>
+template<bool FORWARD, class DataHandle>
+void VariableSizeCommunicator<Allocator>::setupInterfaceTrackers(DataHandle& handle,
+ std::vector<InterfaceTracker>& send_trackers,
+ std::vector<InterfaceTracker>& recv_trackers)
+{
+ if(interface_->size()==0)
+ return;
+ send_trackers.reserve(interface_->size());
+ recv_trackers.reserve(interface_->size());
+
+ int fixedsize=0;
+ if(Impl::callFixedSize(handle))
+ ++fixedsize;
+
+
+ typedef typename InterfaceMap::const_iterator IIter;
+ for(IIter inf=interface_->begin(), end=interface_->end(); inf!=end; ++inf)
+ {
+
+ if(Impl::callFixedSize(handle) && InterfaceInformationChooser<FORWARD>::getSend(inf->second).size())
+ fixedsize=handle.size(InterfaceInformationChooser<FORWARD>::getSend(inf->second)[0]);
+ assert(!Impl::callFixedSize(handle)||fixedsize>0);
+ send_trackers.push_back(InterfaceTracker(inf->first,
+ InterfaceInformationChooser<FORWARD>::getSend(inf->second), fixedsize));
+ recv_trackers.push_back(InterfaceTracker(inf->first,
+ InterfaceInformationChooser<FORWARD>::getReceive(inf->second), fixedsize, fixedsize==0));
+ }
+}
+
+template<class Allocator>
+template<bool FORWARD, class DataHandle>
+void VariableSizeCommunicator<Allocator>::communicateFixedSize(DataHandle& handle)
+{
+ std::vector<MPI_Request> size_send_req(interface_->size());
+ std::vector<MPI_Request> size_recv_req(interface_->size());
+
+ std::vector<InterfaceTracker> send_trackers;
+ std::vector<InterfaceTracker> recv_trackers;
+ setupInterfaceTrackers<FORWARD>(handle,send_trackers, recv_trackers);
+ sendFixedSize(send_trackers, size_send_req, recv_trackers, size_recv_req, communicator_);
+
+ std::vector<MPI_Request> data_send_req(interface_->size(), MPI_REQUEST_NULL);
+ std::vector<MPI_Request> data_recv_req(interface_->size(), MPI_REQUEST_NULL);
+ typedef typename DataHandle::DataType DataType;
+ std::vector<MessageBuffer<DataType> > send_buffers(interface_->size(), MessageBuffer<DataType>(maxBufferSize_)),
+ recv_buffers(interface_->size(), MessageBuffer<DataType>(maxBufferSize_));
+
+
+ setupRequests(handle, send_trackers, send_buffers, data_send_req,
+ SetupSendRequest<DataHandle>(), communicator_);
+
+ std::size_t no_size_to_recv, no_to_send, no_to_recv, old_size;
+ no_size_to_recv = no_to_send = no_to_recv = old_size = interface_->size();
+
+ // Skip empty interfaces.
+ typedef typename std::vector<InterfaceTracker>::const_iterator Iter;
+ for(Iter i=recv_trackers.begin(), end=recv_trackers.end(); i!=end; ++i)
+ if(i->empty())
+ --no_to_recv;
+ for(Iter i=send_trackers.begin(), end=send_trackers.end(); i!=end; ++i)
+ if(i->empty())
+ --no_to_send;
+
+ while(no_size_to_recv+no_to_send+no_to_recv)
+ {
+ // Receive the fixedsize and setup receives accordingly
+ if(no_size_to_recv)
+ no_size_to_recv -= receiveSizeAndSetupReceive(handle,recv_trackers, size_recv_req,
+ data_recv_req, recv_buffers,
+ communicator_);
+
+ // Check send completion and initiate other necessary sends
+ if(no_to_send)
+ no_to_send -= checkSendAndContinueSending(handle, send_trackers, data_send_req,
+ send_buffers, communicator_);
+ if(validRecvRequests(data_recv_req))
+ // Receive data and setup new unblocking receives if necessary
+ no_to_recv -= checkReceiveAndContinueReceiving(handle, recv_trackers, data_recv_req,
+ recv_buffers, communicator_);
+ }
+
+ // Wait for completion of sending the size.
+ //std::vector<MPI_Status> statuses(interface_->size(), MPI_STATUSES_IGNORE);
+ MPI_Waitall(size_send_req.size(), &(size_send_req[0]), MPI_STATUSES_IGNORE);
+
+}
+
+template<class Allocator>
+template<bool FORWARD, class DataHandle>
+void VariableSizeCommunicator<Allocator>::communicateSizes(DataHandle& handle,
+ std::vector<InterfaceTracker>& data_recv_trackers)
+{
+ std::vector<InterfaceTracker> send_trackers;
+ std::vector<InterfaceTracker> recv_trackers;
+ std::size_t size = interface_->size();
+ std::vector<MPI_Request> send_requests(size, MPI_REQUEST_NULL);
+ std::vector<MPI_Request> recv_requests(size, MPI_REQUEST_NULL);
+ std::vector<MessageBuffer<std::size_t> >
+ send_buffers(size, MessageBuffer<std::size_t>(maxBufferSize_)),
+ recv_buffers(size, MessageBuffer<std::size_t>(maxBufferSize_));
+ SizeDataHandle<DataHandle> size_handle(handle,data_recv_trackers);
+ setupInterfaceTrackers<FORWARD>(size_handle,send_trackers, recv_trackers);
+ setupRequests(size_handle, send_trackers, send_buffers, send_requests,
+ SetupSendRequest<SizeDataHandle<DataHandle> >(), communicator_);
+ setupRequests(size_handle, recv_trackers, recv_buffers, recv_requests,
+ SetupRecvRequest<SizeDataHandle<DataHandle> >(), communicator_);
+
+ // Count valid requests that we have to wait for.
+ auto valid_req_func =
+ [](const MPI_Request& req) { return req != MPI_REQUEST_NULL; };
+
+ auto size_to_send = std::count_if(send_requests.begin(), send_requests.end(),
+ valid_req_func);
+ auto size_to_recv = std::count_if(recv_requests.begin(), recv_requests.end(),
+ valid_req_func);
+
+ while(size_to_send+size_to_recv)
+ {
+ if(size_to_send)
+ size_to_send -=
+ checkSendAndContinueSending(size_handle, send_trackers, send_requests,
+ send_buffers, communicator_);
+ if(size_to_recv)
+ // Could have done this using checkSendAndContinueSending
+ // But the call below is more efficient as UnpackSizeEntries
+ // uses std::copy.
+ size_to_recv -=
+ checkAndContinue(size_handle, recv_trackers, recv_requests, recv_requests,
+ recv_buffers, communicator_, UnpackSizeEntries<DataHandle>(),
+ SetupRecvRequest<SizeDataHandle<DataHandle> >());
+ }
+}
+
+template<class Allocator>
+template<bool FORWARD, class DataHandle>
+void VariableSizeCommunicator<Allocator>::communicateVariableSize(DataHandle& handle)
+{
+
+ std::vector<InterfaceTracker> send_trackers;
+ std::vector<InterfaceTracker> recv_trackers;
+ setupInterfaceTrackers<FORWARD>(handle, send_trackers, recv_trackers);
+
+ std::vector<MPI_Request> send_requests(interface_->size(), MPI_REQUEST_NULL);
+ std::vector<MPI_Request> recv_requests(interface_->size(), MPI_REQUEST_NULL);
+ typedef typename DataHandle::DataType DataType;
+ std::vector<MessageBuffer<DataType> >
+ send_buffers(interface_->size(), MessageBuffer<DataType>(maxBufferSize_)),
+ recv_buffers(interface_->size(), MessageBuffer<DataType>(maxBufferSize_));
+
+ communicateSizes<FORWARD>(handle, recv_trackers);
+ // Setup requests for sending and receiving.
+ setupRequests(handle, send_trackers, send_buffers, send_requests,
+ SetupSendRequest<DataHandle>(), communicator_);
+ setupRequests(handle, recv_trackers, recv_buffers, recv_requests,
+ SetupRecvRequest<DataHandle>(), communicator_);
+
+ // Determine number of valid requests.
+ auto valid_req_func =
+ [](const MPI_Request& req) { return req != MPI_REQUEST_NULL;};
+
+ auto no_to_send = std::count_if(send_requests.begin(), send_requests.end(),
+ valid_req_func);
+ auto no_to_recv = std::count_if(recv_requests.begin(), recv_requests.end(),
+ valid_req_func);
+ while(no_to_send+no_to_recv)
+ {
+ // Check send completion and initiate other necessary sends
+ if(no_to_send)
+ no_to_send -= checkSendAndContinueSending(handle, send_trackers, send_requests,
+ send_buffers, communicator_);
+ if(no_to_recv)
+ // Receive data and setup new unblocking receives if necessary
+ no_to_recv -= checkReceiveAndContinueReceiving(handle, recv_trackers, recv_requests,
+ recv_buffers, communicator_);
+ }
+}
+
+template<class Allocator>
+template<bool FORWARD, class DataHandle>
+void VariableSizeCommunicator<Allocator>::communicate(DataHandle& handle)
+{
+ if( interface_->size() == 0)
+ // Simply return as otherwise we will index an empty container
+ // either for MPI_Wait_all or MPI_Test_some.
+ return;
+
+ if(Impl::callFixedSize(handle))
+ communicateFixedSize<FORWARD>(handle);
+ else
+ communicateVariableSize<FORWARD>(handle);
+}
+} // end namespace Dune
+
+#endif // HAVE_MPI
+
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+// vi: set et ts=4 sw=4 sts=4:
+#ifndef DUNE_COMMON_PARAMETERIZEDOBJECT_HH
+#define DUNE_COMMON_PARAMETERIZEDOBJECT_HH
+
+#include <functional>
+#include <map>
+#include <memory>
+
+#include <dune/common/exceptions.hh>
+#include <dune/common/typeutilities.hh>
+
+namespace Dune {
+
+/**
+ * @brief A factory class for parameterized objects.
+ *
+ * It allows the construction of objects adhering to a certain interface that
+ * might be constructed quite differently for one another.
+ *
+ * The Signature parameter defined the "virtual" constructor signature
+ * in the form of Interface(Args...), where Interface is the type of
+ * the (abstract) interface class and Args... is the set of
+ * constrcutor parameters.
+ *
+ * Each type constructed by this factory is identified by a different key. This class
+ * allows for easy registration of type with new keys.
+ *
+ * @tparam Signature Signature of the "virtual" constructor call in the form for Interface(Args...). For default constructors one can omit the ()-brackets.
+ * @tparam KeyT The type of the objects that are used as keys in the lookup [DEFAULT: std::string].
+ */
+template<typename Signature,
+ typename KeyT = std::string>
+class ParameterizedObjectFactory;
+
+template<typename TypeT,
+ typename KeyT,
+ typename... Args>
+class ParameterizedObjectFactory<TypeT(Args...), KeyT>
+{
+ public:
+
+ /** @brief The typ of the keys. */
+ typedef KeyT Key;
+
+ /** @brief The type of objects created by the factory. */
+ using Type = TypeT;
+
+ protected:
+
+ using Creator = std::function<Type(Args...)>;
+
+ template<class F>
+ static constexpr auto has_proper_signature(Dune::PriorityTag<1>)
+ -> decltype( std::declval<F>()(std::declval<Args>()...), std::true_type())
+ {
+ return {};
+ }
+
+ template<class F>
+ static constexpr std::false_type has_proper_signature(Dune::PriorityTag<0>)
+ {
+ return {};
+ }
+
+ public:
+
+ /**
+ * @brief Creates an object identified by a key from given parameters
+ *
+ * @param key The key the object is registered with @see define.
+ * @param args The parameters used for the construction.
+ * @return The object wrapped as Type
+ */
+ Type create(Key const& key, Args ... args) const {
+ typename Registry::const_iterator i = registry_.find(key);
+ if (i == registry_.end()) {
+ DUNE_THROW(Dune::InvalidStateException,
+ "ParametrizedObjectFactory: key ``" <<
+ key << "'' not registered");
+ }
+ else return i->second(args...);
+ }
+
+ /**
+ * @brief Registers a new type with a key.
+ *
+ * After registration objects of this type can be constructed with the
+ * specified key using a matching default creation function. If Type
+ * is a unique_ptr or shared_ptr, the object is created via make_unique
+ * or make_shared, respectively. Otherwise a constructor of Impl
+ * is called.
+ *
+ * @tparam Impl The type of objects to create.
+ *
+ * @param key The key associated with this type.
+ */
+ template<class Impl>
+ void define(Key const& key)
+ {
+ registry_[key] = DefaultCreator<Impl>();
+ }
+
+ /**
+ * @brief Registers a new creator with a key.
+ *
+ * After registration objects can be constructed using
+ * the given creator function.
+ *
+ * @tparam F Type of creator function. This must be callable with Args... .
+ *
+ * @param key The key associated with this type.
+ * @param f Function for creation of objects of type Impl
+ *
+ * \todo Replace has_proper_signature by concept check
+ */
+ template<class F,
+ typename std::enable_if<has_proper_signature<F>(PriorityTag<42>()), int>::type = 0>
+ void define(Key const& key, F&& f)
+ {
+ registry_[key] = f;
+ }
+
+ /**
+ * @brief Registers a new type with a key.
+ *
+ * After registration objects of this type can be created.
+ * This method will store a copy of the given object and
+ * create will hand out a copy to this.
+ *
+ * @tparam Impl The type of objects to create.
+ *
+ * @param key The key associated with this type.
+ * @param t reference object, "create" will call the copy-constructor
+ *
+ * note, this does not work fundamental types
+ */
+ template<class Impl,
+ typename std::enable_if<
+ std::is_convertible<Impl, Type>::value
+ and not std::is_convertible<Impl, Creator>::value,
+ int>::type = 0>
+ void define(Key const& key, Impl&& t)
+ {
+ registry_[key] = [=](Args...) { return t;};
+ }
+
+ bool contains(Key const& key) const
+ {
+ return registry_.count(key);
+ }
+
+ private:
+
+ template<class T>
+ struct Tag{};
+
+ template<class Impl>
+ struct DefaultCreator
+ {
+ template<class... T>
+ Type operator()(T&&... args) const
+ {
+ return DefaultCreator::create(Tag<Type>(), PriorityTag<42>(), std::forward<T>(args)...);
+ }
+
+ template<class Target, class... T>
+ static Type create(Tag<Target>, PriorityTag<1>, T&& ... args) {
+ return Impl(std::forward<T>(args)...);
+ }
+
+ template<class Target, class... T>
+ static Type create(Tag<std::unique_ptr<Target>>, PriorityTag<2>, T&& ... args) {
+ return std::make_unique<Impl>(std::forward<T>(args)...);
+ }
+
+ template<class Target, class... T>
+ static Type create(Tag<std::shared_ptr<Target>>, PriorityTag<3>, T&& ... args) {
+ return std::make_shared<Impl>(std::forward<T>(args)...);
+ }
+
+ };
+
+ typedef std::map<Key, Creator> Registry;
+ Registry registry_;
+};
+
+
+
+} // end namespace Dune
+
+#endif // DUNE_COMMON_PARAMETERIZEDOBJECT_HH
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+#if HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <cstdlib>
+#include <iostream>
+#include <ostream>
+#include <string>
+#include <sstream>
+#include <fstream>
+#include <set>
+#include <algorithm>
+
+#include <dune/common/exceptions.hh>
+#include <dune/common/parametertree.hh>
+
+using namespace Dune;
+
+ParameterTree::ParameterTree()
+{}
+
+const Dune::ParameterTree Dune::ParameterTree::empty_;
+
+void ParameterTree::report(std::ostream& stream, const std::string& prefix) const
+{
+ typedef std::map<std::string, std::string>::const_iterator ValueIt;
+ ValueIt vit = values_.begin();
+ ValueIt vend = values_.end();
+
+ for(; vit!=vend; ++vit)
+ stream << vit->first << " = \"" << vit->second << "\"" << std::endl;
+
+ typedef std::map<std::string, ParameterTree>::const_iterator SubIt;
+ SubIt sit = subs_.begin();
+ SubIt send = subs_.end();
+ for(; sit!=send; ++sit)
+ {
+ stream << "[ " << prefix + prefix_ + sit->first << " ]" << std::endl;
+ (sit->second).report(stream, prefix);
+ }
+}
+
+bool ParameterTree::hasKey(const std::string& key) const
+{
+ std::string::size_type dot = key.find(".");
+
+ if (dot != std::string::npos)
+ {
+ std::string prefix = key.substr(0,dot);
+ if (subs_.count(prefix) == 0)
+ return false;
+
+ if (values_.count(prefix) > 0)
+ DUNE_THROW(RangeError,"key " << prefix << " occurs as value and as subtree");
+
+ const ParameterTree& s = sub(prefix);
+ return s.hasKey(key.substr(dot+1));
+ }
+ else
+ if (values_.count(key) != 0)
+ {
+ if (subs_.count(key) > 0)
+ DUNE_THROW(RangeError,"key " << key << " occurs as value and as subtree");
+ return true;
+ }
+ else
+ return false;
+
+}
+
+bool ParameterTree::hasSub(const std::string& key) const
+{
+ std::string::size_type dot = key.find(".");
+
+ if (dot != std::string::npos)
+ {
+ std::string prefix = key.substr(0,dot);
+ if (subs_.count(prefix) == 0)
+ return false;
+
+ if (values_.count(prefix) > 0)
+ DUNE_THROW(RangeError,"key " << prefix << " occurs as value and as subtree");
+
+ const ParameterTree& s = sub(prefix);
+ return s.hasSub(key.substr(dot+1));
+ }
+ else
+ if (subs_.count(key) != 0)
+ {
+ if (values_.count(key) > 0)
+ DUNE_THROW(RangeError,"key " << key << " occurs as value and as subtree");
+ return true;
+ }
+ else
+ return false;
+}
+
+ParameterTree& ParameterTree::sub(const std::string& key)
+{
+ std::string::size_type dot = key.find(".");
+
+ if (dot != std::string::npos)
+ {
+ ParameterTree& s = sub(key.substr(0,dot));
+ return s.sub(key.substr(dot+1));
+ }
+ else
+ {
+ if (values_.count(key) > 0)
+ DUNE_THROW(RangeError,"key " << key << " occurs as value and as subtree");
+ if (subs_.count(key) == 0)
+ subKeys_.push_back(key.substr(0,dot));
+ subs_[key].prefix_ = prefix_ + key + ".";
+ return subs_[key];
+ }
+}
+
+const ParameterTree& ParameterTree::sub(const std::string& key, bool fail_if_missing) const
+{
+ std::string::size_type dot = key.find(".");
+
+ if (dot != std::string::npos)
+ {
+ const ParameterTree& s = sub(key.substr(0,dot));
+ return s.sub(key.substr(dot+1),fail_if_missing);
+ }
+ else
+ {
+ if (values_.count(key) > 0)
+ DUNE_THROW(RangeError,"key " << key << " occurs as value and as subtree");
+ if (subs_.count(key) == 0)
+ {
+ if (fail_if_missing)
+ {
+ DUNE_THROW(Dune::RangeError, "SubTree '" << key
+ << "' not found in ParameterTree (prefix " + prefix_ + ")");
+ }
+ else
+ return empty_;
+ }
+ return subs_.find(key)->second;
+ }
+}
+
+std::string& ParameterTree::operator[] (const std::string& key)
+{
+ std::string::size_type dot = key.find(".");
+
+ if (dot != std::string::npos)
+ {
+ ParameterTree& s = sub(key.substr(0,dot));
+ return s[key.substr(dot+1)];
+ }
+ else
+ {
+ if (! hasKey(key))
+ valueKeys_.push_back(key);
+ return values_[key];
+ }
+}
+
+const std::string& ParameterTree::operator[] (const std::string& key) const
+{
+ std::string::size_type dot = key.find(".");
+
+ if (dot != std::string::npos)
+ {
+ const ParameterTree& s = sub(key.substr(0,dot));
+ return s[key.substr(dot+1)];
+ }
+ else
+ {
+ if (! hasKey(key))
+ DUNE_THROW(Dune::RangeError, "Key '" << key
+ << "' not found in ParameterTree (prefix " + prefix_ + ")");
+ return values_.find(key)->second;
+ }
+}
+
+std::string ParameterTree::get(const std::string& key, const std::string& defaultValue) const
+{
+ if (hasKey(key))
+ return (*this)[key];
+ else
+ return defaultValue;
+}
+
+std::string ParameterTree::get(const std::string& key, const char* defaultValue) const
+{
+ if (hasKey(key))
+ return (*this)[key];
+ else
+ return defaultValue;
+}
+
+std::string ParameterTree::ltrim(const std::string& s)
+{
+ std::size_t firstNonWS = s.find_first_not_of(" \t\n\r");
+
+ if (firstNonWS!=std::string::npos)
+ return s.substr(firstNonWS);
+ return std::string();
+}
+
+std::string ParameterTree::rtrim(const std::string& s)
+{
+ std::size_t lastNonWS = s.find_last_not_of(" \t\n\r");
+
+ if (lastNonWS!=std::string::npos)
+ return s.substr(0, lastNonWS+1);
+ return std::string();
+}
+
+std::vector<std::string> ParameterTree::split(const std::string & s) {
+ std::vector<std::string> substrings;
+ std::size_t front = 0, back = 0, size = 0;
+
+ while (front != std::string::npos)
+ {
+ // find beginning of substring
+ front = s.find_first_not_of(" \t\n\r", back);
+ back = s.find_first_of(" \t\n\r", front);
+ size = back - front;
+ if (size > 0)
+ substrings.push_back(s.substr(front, size));
+ }
+ return substrings;
+}
+
+const ParameterTree::KeyVector& ParameterTree::getValueKeys() const
+{
+ return valueKeys_;
+}
+
+const ParameterTree::KeyVector& ParameterTree::getSubKeys() const
+{
+ return subKeys_;
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_PARAMETERTREE_HH
+#define DUNE_PARAMETERTREE_HH
+
+/** \file
+ * \brief A hierarchical structure of string parameters
+ */
+
+#include <array>
+#include <cstddef>
+#include <iostream>
+#include <istream>
+#include <iterator>
+#include <locale>
+#include <map>
+#include <ostream>
+#include <sstream>
+#include <string>
+#include <typeinfo>
+#include <vector>
+#include <algorithm>
+#include <bitset>
+
+#include <dune/common/exceptions.hh>
+#include <dune/common/fvector.hh>
+#include <dune/common/classname.hh>
+
+namespace Dune {
+
+ /** \brief Hierarchical structure of string parameters
+ * \ingroup Common
+ */
+ class ParameterTree
+ {
+ // class providing a single static parse() function, used by the
+ // generic get() method
+ template<typename T>
+ struct Parser;
+
+ public:
+
+ /** \brief storage for key lists
+ */
+ typedef std::vector<std::string> KeyVector;
+
+ /** \brief Create new empty ParameterTree
+ */
+ ParameterTree();
+
+
+ /** \brief test for key
+ *
+ * Tests whether given key exists.
+ *
+ * \param key key name
+ * \return true if key exists in structure, otherwise false
+ */
+ bool hasKey(const std::string& key) const;
+
+
+ /** \brief test for substructure
+ *
+ * Tests whether given substructure exists.
+ *
+ * \param sub substructure name
+ * \return true if substructure exists in structure, otherwise false
+ */
+ bool hasSub(const std::string& sub) const;
+
+
+ /** \brief get value reference for key
+ *
+ * Returns reference to value for given key name.
+ * This creates the key, if not existent.
+ *
+ * \param key key name
+ * \return reference to corresponding value
+ */
+ std::string& operator[] (const std::string& key);
+
+
+ /** \brief get value reference for key
+ *
+ * Returns reference to value for given key name.
+ * This creates the key, if not existent.
+ *
+ * \param key key name
+ * \return reference to corresponding value
+ * \throw Dune::RangeError if key is not found
+ */
+ const std::string& operator[] (const std::string& key) const;
+
+
+ /** \brief print distinct substructure to stream
+ *
+ * Prints all entries with given prefix.
+ *
+ * \param stream Stream to print to
+ * \param prefix for key and substructure names
+ */
+ void report(std::ostream& stream = std::cout,
+ const std::string& prefix = "") const;
+
+
+ /** \brief get substructure by name
+ *
+ * \param sub substructure name
+ * \return reference to substructure
+ */
+ ParameterTree& sub(const std::string& sub);
+
+
+ /** \brief get const substructure by name
+ *
+ * \param sub substructure name
+ * \param fail_if_missing if true, throw an error if substructure is missing
+ * \return reference to substructure
+ */
+ const ParameterTree& sub(const std::string& sub, bool fail_if_missing = false) const;
+
+
+ /** \brief get value as string
+ *
+ * Returns pure string value for given key.
+ *
+ * \param key key name
+ * \param defaultValue default if key does not exist
+ * \return value as string
+ */
+ std::string get(const std::string& key, const std::string& defaultValue) const;
+
+ /** \brief get value as string
+ *
+ * Returns pure string value for given key.
+ *
+ * \todo This is a hack so get("my_key", "xyz") compiles
+ * (without this method "xyz" resolves to bool instead of std::string)
+ * \param key key name
+ * \param defaultValue default if key does not exist
+ * \return value as string
+ */
+ std::string get(const std::string& key, const char* defaultValue) const;
+
+
+ /** \brief get value converted to a certain type
+ *
+ * Returns value as type T for given key.
+ *
+ * \tparam T type of returned value.
+ * \param key key name
+ * \param defaultValue default if key does not exist
+ * \return value converted to T
+ */
+ template<typename T>
+ T get(const std::string& key, const T& defaultValue) const {
+ if(hasKey(key))
+ return get<T>(key);
+ else
+ return defaultValue;
+ }
+
+ /** \brief Get value
+ *
+ * \tparam T Type of the value
+ * \param key Key name
+ * \throws RangeError if key does not exist
+ * \throws NotImplemented Type is not supported
+ * \return value as T
+ */
+ template <class T>
+ T get(const std::string& key) const {
+ if(not hasKey(key))
+ DUNE_THROW(Dune::RangeError, "Key '" << key
+ << "' not found in ParameterTree (prefix " + prefix_ + ")");
+ try {
+ return Parser<T>::parse((*this)[key]);
+ }
+ catch(const RangeError& e) {
+ // rethrow the error and add more information
+ DUNE_THROW(RangeError, "Cannot parse value \"" << (*this)[key]
+ << "\" for key \"" << prefix_ << "." << key << "\""
+ << e.what());
+ }
+ }
+
+ /** \brief get value keys
+ *
+ * Returns a vector of all keys associated to (key,values) entries in
+ * order of appearance
+ *
+ * \return reference to entry vector
+ */
+ const KeyVector& getValueKeys() const;
+
+
+ /** \brief get substructure keys
+ *
+ * Returns a vector of all keys associated to (key,substructure) entries
+ * in order of appearance
+ *
+ * \return reference to entry vector
+ */
+ const KeyVector& getSubKeys() const;
+
+ protected:
+
+ static const ParameterTree empty_;
+
+ std::string prefix_;
+
+ KeyVector valueKeys_;
+ KeyVector subKeys_;
+
+ std::map<std::string, std::string> values_;
+ std::map<std::string, ParameterTree> subs_;
+
+ static std::string ltrim(const std::string& s);
+ static std::string rtrim(const std::string& s);
+ static std::vector<std::string> split(const std::string & s);
+
+ // parse into a fixed-size range of iterators
+ template<class Iterator>
+ static void parseRange(const std::string &str,
+ Iterator it, const Iterator &end)
+ {
+ typedef typename std::iterator_traits<Iterator>::value_type Value;
+ std::istringstream s(str);
+ // make sure we are in locale "C"
+ s.imbue(std::locale::classic());
+ std::size_t n = 0;
+ for(; it != end; ++it, ++n) {
+ s >> *it;
+ if(!s)
+ DUNE_THROW(RangeError, "as a range of items of type "
+ << className<Value>()
+ << " (" << n << " items were extracted successfully)");
+ }
+ Value dummy;
+ s >> dummy;
+ // now extraction should have failed, and eof should be set
+ if(not s.fail() or not s.eof())
+ DUNE_THROW(RangeError, "as a range of "
+ << n << " items of type "
+ << className<Value>() << " (more items than the range can hold)");
+ }
+ };
+
+ template<typename T>
+ struct ParameterTree::Parser {
+ static T parse(const std::string& str) {
+ T val;
+ std::istringstream s(str);
+ // make sure we are in locale "C"
+ s.imbue(std::locale::classic());
+ s >> val;
+ if(!s)
+ DUNE_THROW(RangeError, " as a " << className<T>());
+ char dummy;
+ s >> dummy;
+ // now extraction should have failed, and eof should be set
+ if ((! s.fail()) || (! s.eof()))
+ DUNE_THROW(RangeError, " as a " << className<T>());
+ return val;
+ }
+ };
+
+ // "How do I convert a string into a wstring in C++?" "Why, that very simple
+ // son. You just need a these hundred lines of code."
+ // Instead im gonna restrict myself to string with charT=char here
+ template<typename traits, typename Allocator>
+ struct ParameterTree::Parser<std::basic_string<char, traits, Allocator> > {
+ static std::basic_string<char, traits, Allocator>
+ parse(const std::string& str) {
+ std::string trimmed = ltrim(rtrim(str));
+ return std::basic_string<char, traits, Allocator>(trimmed.begin(),
+ trimmed.end());
+ }
+ };
+
+ template<>
+ struct ParameterTree::Parser< bool > {
+ struct ToLower {
+ char operator()(char c)
+ {
+ return std::tolower(c, std::locale::classic());
+ }
+ };
+
+ static bool
+ parse(const std::string& str) {
+ std::string ret = str;
+
+ std::transform(ret.begin(), ret.end(), ret.begin(), ToLower());
+
+ if (ret == "yes" || ret == "true")
+ return true;
+
+ if (ret == "no" || ret == "false")
+ return false;
+
+ return (Parser<int>::parse(ret) != 0);
+ }
+ };
+
+ template<typename T, int n>
+ struct ParameterTree::Parser<FieldVector<T, n> > {
+ static FieldVector<T, n>
+ parse(const std::string& str) {
+ FieldVector<T, n> val;
+ parseRange(str, val.begin(), val.end());
+ return val;
+ }
+ };
+
+ template<typename T, std::size_t n>
+ struct ParameterTree::Parser<std::array<T, n> > {
+ static std::array<T, n>
+ parse(const std::string& str) {
+ std::array<T, n> val;
+ parseRange(str, val.begin(), val.end());
+ return val;
+ }
+ };
+
+ template<std::size_t n>
+ struct ParameterTree::Parser<std::bitset<n> > {
+ static std::bitset<n>
+ parse(const std::string& str) {
+ std::bitset<n> val;
+ std::vector<std::string> sub = split(str);
+ if (sub.size() != n)
+ DUNE_THROW(RangeError, "as a bitset<" << n << "> "
+ << "because of unmatching size " << sub.size());
+ for (std::size_t i=0; i<n; ++i) {
+ val[i] = ParameterTree::Parser<bool>::parse(sub[i]);
+ }
+ return val;
+ }
+ };
+
+ template<typename T, typename A>
+ struct ParameterTree::Parser<std::vector<T, A> > {
+ static std::vector<T, A>
+ parse(const std::string& str) {
+ std::vector<std::string> sub = split(str);
+ std::vector<T, A> vec;
+ for (unsigned int i=0; i<sub.size(); ++i) {
+ T val = ParameterTree::Parser<T>::parse(sub[i]);
+ vec.push_back(val);
+ }
+ return vec;
+ }
+ };
+
+} // end namespace Dune
+
+#endif // DUNE_PARAMETERTREE_HH
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+#if HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "parametertreeparser.hh"
+
+#include <cstdlib>
+#include <iostream>
+#include <ostream>
+#include <string>
+#include <sstream>
+#include <fstream>
+#include <set>
+#include <map>
+#include <algorithm>
+
+#include <dune/common/exceptions.hh>
+
+std::string Dune::ParameterTreeParser::ltrim(const std::string& s)
+{
+ std::size_t firstNonWS = s.find_first_not_of(" \t\n\r");
+
+ if (firstNonWS!=std::string::npos)
+ return s.substr(firstNonWS);
+ return std::string();
+}
+
+std::string Dune::ParameterTreeParser::rtrim(const std::string& s)
+{
+ std::size_t lastNonWS = s.find_last_not_of(" \t\n\r");
+
+ if (lastNonWS!=std::string::npos)
+ return s.substr(0, lastNonWS+1);
+ return std::string();
+}
+
+Dune::ParameterTree Dune::ParameterTreeParser::readINITree(const std::string& file)
+{
+ std::ifstream in(file);
+
+ if (!in)
+ DUNE_THROW(Dune::IOError, "Could not open configuration file " << file);
+
+ Dune::ParameterTree pt;
+ readINITree(in, pt, "file '" + file + "'", true);
+ return pt;
+}
+
+Dune::ParameterTree Dune::ParameterTreeParser::readINITree(std::istream& in)
+{
+ Dune::ParameterTree pt;
+ readINITree(in, pt, "stream", true);
+ return pt;
+}
+
+void Dune::ParameterTreeParser::readINITree(std::string file,
+ ParameterTree& pt,
+ bool overwrite)
+{
+ std::ifstream in(file.c_str());
+
+ if (!in)
+ DUNE_THROW(Dune::IOError, "Could not open configuration file " << file);
+
+ readINITree(in, pt, "file '" + file + "'", overwrite);
+}
+
+
+void Dune::ParameterTreeParser::readINITree(std::istream& in,
+ ParameterTree& pt,
+ bool overwrite)
+{
+ readINITree(in, pt, "stream", overwrite);
+}
+
+
+void Dune::ParameterTreeParser::readINITree(std::istream& in,
+ ParameterTree& pt,
+ const std::string srcname,
+ bool overwrite)
+{
+ std::string prefix;
+ std::set<std::string> keysInFile;
+ while(!in.eof())
+ {
+ std::string line;
+ getline(in, line);
+ line = ltrim(line);
+ if (line.size() == 0)
+ continue;
+ switch (line[0]) {
+ case '#' :
+ break;
+ case '[' :
+ {
+ size_t pos = line.find(']');
+ if (pos != std::string::npos) {
+ prefix = rtrim(ltrim(line.substr(1, pos-1)));
+ if (prefix != "")
+ prefix += ".";
+ }
+ }
+ break;
+ default :
+ std::string::size_type comment = line.find("#");
+ line = line.substr(0,comment);
+ std::string::size_type mid = line.find("=");
+ if (mid != std::string::npos)
+ {
+ std::string key = prefix+rtrim(ltrim(line.substr(0, mid)));
+ std::string value = ltrim(line.substr(mid+1));
+
+ if (value.length()>0)
+ {
+ // handle quoted strings
+ if ((value[0]=='\'') || (value[0]=='"'))
+ {
+ char quote = value[0];
+ value=value.substr(1);
+ while (*(rtrim(value).rbegin())!=quote)
+ {
+ if (! in.eof())
+ {
+ std::string l;
+ getline(in, l);
+ value = value+"\n"+l;
+ }
+ else
+ value = value+quote;
+ }
+ value = rtrim(value);
+ value = value.substr(0,value.length()-1);
+ }
+ else
+ value = rtrim(value);
+ }
+
+ if (keysInFile.count(key) != 0)
+ DUNE_THROW(ParameterTreeParserError, "Key '" << key <<
+ "' appears twice in " << srcname << " !");
+ else
+ {
+ if(overwrite || ! pt.hasKey(key))
+ pt[key] = value;
+ keysInFile.insert(key);
+ }
+ }
+ break;
+ }
+ }
+
+}
+
+void Dune::ParameterTreeParser::readOptions(int argc, char* argv [],
+ ParameterTree& pt)
+{
+ for(int i=1; i<argc; i++)
+ {
+ if ((argv[i][0]=='-') && (argv[i][1]!='\000'))
+ {
+ if(argv[i+1] == NULL)
+ DUNE_THROW(RangeError, "last option on command line (" << argv[i]
+ << ") does not have an argument");
+ pt[argv[i]+1] = argv[i+1];
+ ++i; // skip over option argument
+ }
+ }
+}
+
+void Dune::ParameterTreeParser::readNamedOptions(int argc, char* argv[],
+ ParameterTree& pt,
+ std::vector<std::string> keywords,
+ unsigned int required,
+ bool allow_more,
+ bool overwrite,
+ std::vector<std::string> help)
+{
+ std::string helpstr = generateHelpString(argv[0], keywords, required, help);
+ std::vector<bool> done(keywords.size(),false);
+ std::size_t current = 0;
+
+ for (std::size_t i=1; i<std::size_t(argc); i++)
+ {
+ std::string opt = argv[i];
+ // check for help
+ if (opt == "-h" || opt == "--help")
+ DUNE_THROW(HelpRequest, helpstr);
+ // is this a named parameter?
+ if (opt.substr(0,2) == "--")
+ {
+ size_t pos = opt.find('=',2);
+ if (pos == std::string::npos)
+ DUNE_THROW(ParameterTreeParserError,
+ "value missing for parameter " << opt << "\n" << helpstr);
+ std::string key = opt.substr(2,pos-2);
+ std::string value = opt.substr(pos+1,opt.size()-pos-1);
+ auto it = std::find(keywords.begin(), keywords.end(), key);
+ // is this param in the keywords?
+ if (!allow_more && it == keywords.end())
+ DUNE_THROW(ParameterTreeParserError,
+ "unknown parameter " << key << "\n" << helpstr);
+ // do we overwrite an existing entry?
+ if (!overwrite && pt[key] != "")
+ DUNE_THROW(ParameterTreeParserError,
+ "parameter " << key << " already specified" << "\n" << helpstr);
+ pt[key] = value;
+ if(it != keywords.end())
+ done[std::distance(keywords.begin(),it)] = true; // mark key as stored
+ }
+ else {
+ // map to the next keyword in the list
+ while(current < done.size() && done[current]) ++current;
+ // are there keywords left?
+ if (current >= done.size())
+ DUNE_THROW(ParameterTreeParserError,
+ "superfluous unnamed parameter" << "\n" << helpstr);
+ // do we overwrite an existing entry?
+ if (!overwrite && pt[keywords[current]] != "")
+ DUNE_THROW(ParameterTreeParserError,
+ "parameter " << keywords[current] << " already specified" << "\n" << helpstr);
+ pt[keywords[current]] = opt;
+ done[current] = true; // mark key as stored
+ }
+ }
+ // check that we receive all required keywords
+ std::string missing = "";
+ for (unsigned int i=0; i<keywords.size(); i++)
+ if ((i < required) && ! done[i]) // is this param required?
+ missing += std::string(" ") + keywords[i];
+ if (missing.size())
+ DUNE_THROW(ParameterTreeParserError,
+ "missing parameter(s) ... " << missing << "\n" << helpstr);
+}
+
+std::string Dune::ParameterTreeParser::generateHelpString(
+ std::string progname, std::vector<std::string> keywords, unsigned int required, std::vector<std::string> help)
+{
+ static const char braces[] = "<>[]";
+ std::string helpstr = "";
+ helpstr = helpstr + "Usage: " + progname;
+ for (std::size_t i=0; i<keywords.size(); i++)
+ {
+ bool req = (i < required);
+ helpstr = helpstr +
+ " " + braces[req*2] +
+ keywords[i] +braces[req*2+1];
+ }
+ helpstr = helpstr + "\n"
+ "Options:\n"
+ "-h / --help: this help\n";
+ for (std::size_t i=0; i<std::min(keywords.size(),help.size()); i++)
+ {
+ if (help[i] != "")
+ helpstr = helpstr + "-" +
+ keywords[i] + ":\t" + help[i] + "\n";
+ }
+ return helpstr;
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_PARAMETER_PARSER_HH
+#define DUNE_PARAMETER_PARSER_HH
+
+/** \file
+ * \brief Various parser methods to get data into a ParameterTree object
+ */
+
+#include <istream>
+#include <string>
+#include <vector>
+
+#include <dune/common/parametertree.hh>
+#include <dune/common/exceptions.hh>
+
+namespace Dune {
+
+ /** \brief report parser error while reading ParameterTree */
+ class ParameterTreeParserError : public RangeError {};
+ /** \brief exception thrown if the user wants to see help string
+
+ this exception is only thrown if the command line parameters
+ contain an option --help or -h
+ */
+ class HelpRequest : public Exception {};
+
+ /** \brief Parsers to set up a ParameterTree from various input sources
+ * \ingroup Common
+ *
+ */
+ class ParameterTreeParser
+ {
+
+ static std::string ltrim(const std::string& s);
+ static std::string rtrim(const std::string& s);
+
+ public:
+
+ /** @name Parsing methods for the INITree file format
+ *
+ * INITree files should look like this
+ * \verbatim
+ * # this file configures fruit colors in fruitsalad
+ *
+ *
+ * #these are no fruit but could also appear in fruit salad
+ * honeydewmelon = yellow
+ * watermelon = green
+ *
+ * fruit.tropicalfruit.orange = orange
+ *
+ * [fruit]
+ * strawberry = red
+ * pomegranate = red
+ *
+ * [fruit.pipfruit]
+ * apple = green/red/yellow
+ * pear = green
+ *
+ * [fruit.stonefruit]
+ * cherry = red
+ * plum = purple
+ *
+ * \endverbatim
+ *
+ *
+ * If a '[prefix]' statement appears all following entries use this prefix
+ * until the next '[prefix]' statement. Fruitsalads for example contain:
+ * \verbatim
+ * honeydewmelon = yellow
+ * fruit.tropicalfruit.orange = orange
+ * fruit.pipfruit.apple = green/red/yellow
+ * fruit.stonefruit.cherry = red
+ * \endverbatim
+ *
+ * All keys with a common 'prefix.' belong to the same substructure called
+ * 'prefix'. Leading and trailing spaces and tabs are removed from the
+ * values unless you use single or double quotes around them. Using single
+ * or double quotes you can also have multiline values.
+ */
+ //@{
+
+ /** \brief parse C++ stream
+ *
+ * Parses C++ stream and build hierarchical config structure.
+ *
+ * \param in The stream to parse
+ * \param[out] pt The parameter tree to store the config structure.
+ * \param overwrite Whether to overwrite already existing values.
+ * If false, values in the stream will be ignored
+ * if the key is already present.
+ */
+ static void readINITree(std::istream& in, ParameterTree& pt,
+ bool overwrite);
+
+ /** \brief parse C++ stream
+ *
+ * Parses C++ stream and returns hierarchical config structure.
+ *
+ * \param in The stream to parse
+ */
+ static Dune::ParameterTree readINITree(std::istream& in);
+
+
+ /** \brief parse C++ stream
+ *
+ * Parses C++ stream and build hierarchical config structure.
+ *
+ * \param in The stream to parse
+ * \param[out] pt The parameter tree to store the config structure.
+ * \param srcname Name of the configuration source for error
+ * messages, "stdin" or a filename.
+ * \param overwrite Whether to overwrite already existing values.
+ * If false, values in the stream will be ignored
+ * if the key is already present.
+ */
+ static void readINITree(std::istream& in, ParameterTree& pt,
+ const std::string srcname = "stream",
+ bool overwrite = true);
+
+
+ /** \brief parse file
+ *
+ * Parses file with given name and build hierarchical config structure.
+ *
+ * \param file filename
+ * \param[out] pt The parameter tree to store the config structure.
+ * \param overwrite Whether to overwrite already existing values.
+ * If false, values in the stream will be ignored
+ * if the key is already present.
+ */
+ static void readINITree(std::string file, ParameterTree& pt, bool overwrite = true);
+
+ /** \brief parse file and return tree
+ *
+ * Parses file with given name and returns hierarchical config structure.
+ *
+ * \param file filename
+ */
+ static Dune::ParameterTree readINITree(const std::string& file);
+
+ //@}
+
+ /** \brief parse command line options and build hierarchical ParameterTree structure
+ *
+ * The list of command line options is searched for pairs of the type <kbd>-key value</kbd>
+ * (note the hyphen in front of the key).
+ * For each such pair of options a key-value pair with the corresponding names
+ * is then created in the ParameterTree.
+ *
+ * \param argc arg count
+ * \param argv arg values
+ * \param[out] pt The parameter tree to store the config structure.
+ */
+ static void readOptions(int argc, char* argv [], ParameterTree& pt);
+
+ /**
+ * \brief read [named] command line options and build hierarchical ParameterTree structure
+ *
+ * Similar to pythons named options we expect the parameters in the
+ * ordering induced by keywords, but allow the user to pass named options
+ * in the form of --key=value. Optionally the user can pass an additional
+ * vector with help strings.
+ *
+ * \param argc arg count
+ * \param argv arg values
+ * \param[out] pt The parameter tree to store the config structure.
+ * \param keywords vector with keywords names
+ * \param required number of required options (the first n keywords are required, default is all are required)
+ * \param allow_more allow more options than these listed in keywords (default = true)
+ * \param overwrite allow to overwrite existing options (default = true)
+ * \param help vector containing help strings
+ */
+ static void readNamedOptions(int argc, char* argv[],
+ ParameterTree& pt,
+ std::vector<std::string> keywords,
+ unsigned int required = std::numeric_limits<unsigned int>::max(),
+ bool allow_more = true,
+ bool overwrite = true,
+ std::vector<std::string> help = std::vector<std::string>());
+
+ private:
+ static std::string generateHelpString(std::string progname, std::vector<std::string> keywords, unsigned int required, std::vector<std::string> help);
+ };
+
+} // end namespace Dune
+
+#endif // DUNE_PARAMETER_PARSER_HH
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+#if HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <algorithm>
+#include <iterator>
+#include <string>
+
+#include <dune/common/exceptions.hh>
+#include <dune/common/stringutility.hh>
+#include <dune/common/path.hh>
+
+namespace Dune {
+ /**
+ * @addtogroup Path Filesystem Paths
+ * @ingroup Common
+ * @{
+ */
+
+ /**
+ * @file
+ * @author Jö Fahlke <jorrit@jorrit.de>
+ * @brief Utilites for handling filesystem paths
+ */
+
+ //! concatenate two paths
+ std::string concatPaths(const std::string& base, const std::string& p) {
+ if(p == "") return base;
+ if(p[0] == '/') return p;
+ if(base == "") return p;
+ if(hasSuffix(base, "/")) return base+p;
+ else return base+'/'+p;
+ }
+
+ //! sanitize a path for further processing
+ std::string processPath(const std::string& p) {
+ std::string result = p;
+ std::string::size_type src, dst;
+
+ // append a '/' to non-empty paths
+ if(result != "") result += '/';
+
+ // each path component now has a trailing '/'
+
+ // collapse any occurrence of multiple '/' to a single '/'
+ dst = src = 0;
+ while(src < result.size()) {
+ result[dst] = result[src];
+ ++src;
+ if(result[dst] == '/')
+ while(src < result.size() && result[src] == '/')
+ ++src;
+ ++dst;
+ }
+ result.resize(dst);
+
+ // the path is now free of multiple '/' in a row
+
+ // collapse any occurrence of "/./" to "/"
+ dst = src = 0;
+ while(src < result.size()) {
+ result[dst] = result[src];
+ ++src;
+ if(result[dst] == '/')
+ while(src+1 < result.size() && result[src] == '.' &&
+ result[src+1] == '/')
+ src+=2;
+ ++dst;
+ }
+ result.resize(dst);
+
+ // there may be at most one leading "./". If so, remove it
+ if(hasPrefix(result, "./")) result.erase(0, 2);
+
+ // the path is now free of "."-components
+
+ // remove any "<component>/../" pairs
+ src = 0;
+ while(true) {
+ src = result.find("/../", src);
+ if(src == std::string::npos)
+ break;
+ for(dst = src; dst > 0 && result[dst-1] != '/'; --dst) ;
+ if(result.substr(dst, src-dst) == "..") {
+ // don't remove "../../"
+ src += 3;
+ continue;
+ }
+ if(dst == src)
+ // special case: "<component>" is the empty component. This means we
+ // found a leading "/../" in an absolute path, remove "/.."
+ result.erase(0, 3);
+ else {
+ // remove "<component>/../".
+ result.erase(dst, src-dst+4);
+ src = dst;
+ // try to back up one character so we are at a '/' instead of at the
+ // beginning of a component
+ if(src > 0) --src;
+ }
+ }
+
+ // absolute paths are now free of ".." components, and relative paths
+ // contain only leading ".." components
+
+ return result;
+ }
+
+ //! check whether the given path indicates that it is a directory
+ bool pathIndicatesDirectory(const std::string& p) {
+ if(p == "") return true;
+ if(p == ".") return true;
+ if(p == "..") return true;
+ if(hasSuffix(p, "/")) return true;
+ if(hasSuffix(p, "/.")) return true;
+ if(hasSuffix(p, "/..")) return true;
+ else return false;
+ }
+
+ //! pretty print path
+ std::string prettyPath(const std::string& p, bool isDirectory) {
+ std::string result = processPath(p);
+ // current directory
+ if(result == "") return ".";
+ // root directory
+ if(result == "/") return result;
+
+ // remove the trailing '/' for now
+ result.resize(result.size()-1);
+
+ // if the result ends in "..", we don't need to append '/' to make clear
+ // it's a directory
+ if(result == ".." || hasSuffix(result, "/.."))
+ return result;
+
+ // if it's a directory, tuck the '/' back on
+ if(isDirectory) result += '/';
+
+ return result;
+ }
+
+ //! pretty print path
+ std::string prettyPath(const std::string& p) {
+ return prettyPath(p, pathIndicatesDirectory(p));
+ }
+
+ //! compute a relative path between two paths
+ std::string relativePath(const std::string& newbase, const std::string& p)
+ {
+ bool absbase = hasPrefix(newbase, "/");
+ bool absp = hasPrefix(p, "/");
+ if(absbase != absp)
+ DUNE_THROW(NotImplemented, "relativePath: paths must be either both "
+ "relative or both absolute: newbase=\"" << newbase << "\" "
+ "p=\"" << p << "\"");
+
+ std::string mybase = processPath(newbase);
+ std::string myp = processPath(p);
+
+ // remove as many matching leading components as possible
+ // determine prefix length
+ std::string::size_type preflen = 0;
+ while(preflen < mybase.size() && preflen < myp.size() &&
+ mybase[preflen] == myp[preflen])
+ ++preflen;
+ // backup to the beginning of the component
+ while(preflen > 0 && myp[preflen-1] != '/')
+ --preflen;
+ mybase.erase(0, preflen);
+ myp.erase(0,preflen);
+
+ // if mybase contains leading ".." components, we're screwed
+ if(hasPrefix(mybase, "../"))
+ DUNE_THROW(NotImplemented, "relativePath: newbase has too many leading "
+ "\"..\" components: newbase=\"" << newbase << "\" "
+ "p=\"" << p << "\"");
+
+ // count the number of components in mybase
+ typedef std::iterator_traits<std::string::iterator>::difference_type
+ count_t;
+ count_t count = std::count(mybase.begin(), mybase.end(), '/');
+
+ std::string result;
+ // prefix with that many leading components
+ for(count_t i = 0; i < count; ++i)
+ result += "../";
+ // append what is left of p
+ result += myp;
+
+ return result;
+ }
+
+ /** @} group Path */
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_COMMON_PATH_HH
+#define DUNE_COMMON_PATH_HH
+
+#include <string>
+
+namespace Dune {
+ /**
+ * @addtogroup Path
+ * @{
+ */
+
+ /**
+ * @file
+ * @author Jö Fahlke <jorrit@jorrit.de>
+ * @brief Utilities for handling filesystem paths
+ */
+
+ //! concatenate two paths
+ /**
+ * \param base The base path.
+ * \param p The path to concatenate onto base.
+ *
+ * If p is an absolute path, return p. Otherwise return the
+ * string-concatenation of base and path, possibly with a '/' in between, if
+ * necessary.
+ *
+ * Some examples:
+ * <table>
+ * <tr><th> base </th><th> p </th><th> result </th></tr>
+ * <tr><td> anything </td><td> "/abs/path" </td><td> "/abs/path" </td></tr>
+ * <tr><td> "a" </td><td> "b" </td><td> "a/b" </td></tr>
+ * <tr><td> "/a" </td><td> "b" </td><td> "/a/b" </td></tr>
+ * <tr><td> "a/" </td><td> "b" </td><td> "a/b" </td></tr>
+ * <tr><td> "a" </td><td> "b/" </td><td> "a/b/" </td></tr>
+ * <tr><td> ".." </td><td> "b" </td><td> "../b" </td></tr>
+ * <tr><td> "a" </td><td> ".." </td><td> "a/.." </td></tr>
+ * <tr><td> "." </td><td> "b" </td><td> "./b" </td></tr>
+ * <tr><td> "a" </td><td> "." </td><td> "a/." </td></tr>
+ * <tr><td> "" </td><td> "b" </td><td> "b" </td></tr>
+ * <tr><td> "a" </td><td> "" </td><td> "a" </td></tr>
+ * <tr><td> "" </td><td> "" </td><td> "" </td></tr>
+ * </table>
+ *
+ * If both base and p are sanitized as per processPath(), and if p does not
+ * contain any leading "../", then the result will also be sanitized.
+ */
+ std::string concatPaths(const std::string& base, const std::string& p);
+
+ //! sanitize a path for further processing
+ /**
+ * Sanitize the path as far as possible to make further processing easier.
+ * The resulting path has the following properties:
+ * <ul>
+ * <li> The path is a series of components, each followed by a single '/'.
+ * <li> An absolute path starts with an empty component followed by a '/',
+ * so its first character will be '/'. This is the only case where an
+ * empty component can occur.
+ * <li> The path does not contain any component ".". Any such component in
+ * the input is removed.
+ * <li> A ".." component may only occur in the following case: A relative
+ * path may contain a series of ".." in the beginning. Any other
+ * occurrences of ".." in the input is collapsed with a preceding
+ * component or simply removed if it is at the beginning of an absolute
+ * path.
+ * </ul>
+ *
+ * \note The result is really meant for processing only since it has two
+ * unusual properties: First, any path denoting the current directory
+ * in the input, such as "." will result in an empty path "". Second,
+ * any non-empty result path will have a trailing '/'. For other
+ * uses, prettyPath() may be more appropriate.
+ *
+ * Some examples:
+ * <table>
+ * <tr><th> p </th><th> result </th></tr>
+ * <tr><td> "" </td><td> "" </td></tr>
+ * <tr><td> "." </td><td> "" </td></tr>
+ * <tr><td> "./" </td><td> "" </td></tr>
+ * <tr><td> "a/.." </td><td> "" </td></tr>
+ * <tr><td> ".." </td><td> "../" </td></tr>
+ * <tr><td> "../a" </td><td> "../a/" </td></tr>
+ * <tr><td> "a" </td><td> "a/" </td></tr>
+ * <tr><td> "a//" </td><td> "a/" </td></tr>
+ * <tr><td> "a///b" </td><td> "a/b/" </td></tr>
+ * <tr><td> "/" </td><td> "/" </td></tr>
+ * <tr><td> "/." </td><td> "/" </td></tr>
+ * <tr><td> "/.." </td><td> "/" </td></tr>
+ * <tr><td> "/a/.." </td><td> "/" </td></tr>
+ * <tr><td> "/a" </td><td> "/a/" </td></tr>
+ * <tr><td> "/a/" </td><td> "/a/" </td></tr>
+ * <tr><td> "/../a/" </td><td> "/a/" </td></tr>
+ * </table>
+ */
+ std::string processPath(const std::string& p);
+
+ //! check whether the given path indicates that it is a directory
+ /**
+ * In particular the following kinds of paths indicate a directory:
+ * <ul>
+ * <li> The empty path (denotes the current directory),
+ * <li> any path with a trailing '/',
+ * <li> any path whose last component is "." or "..".
+ * </ul>
+ */
+ bool pathIndicatesDirectory(const std::string& p);
+
+ //! pretty print path
+ /**
+ * \param p Path to pretty-print.
+ * \param isDirectory Whether to append a '/' to make clear this is a
+ * directory.
+ *
+ * Pretty print the path. This removes any duplicate '/' and any
+ * superfluous occurrences of ".." and ".". The resulting path will have a
+ * trailing '/' if it is the root path or if isDirectory is true. It will
+ * however not have a trailing '/' if it is otherwise clear that it is a
+ * directory -- i.e. if its last component is "." or "..".
+ *
+ * Some examples:
+ * <table>
+ * <tr><th> p </th><th> isDirectory </th><th> result </th></tr>
+ * <tr><td> "" </td><td> anything </td><td> "." </td></tr>
+ * <tr><td> "." </td><td> anything </td><td> "." </td></tr>
+ * <tr><td> "./" </td><td> anything </td><td> "." </td></tr>
+ * <tr><td> "a/.." </td><td> anything </td><td> "." </td></tr>
+ * <tr><td> ".." </td><td> anything </td><td> ".." </td></tr>
+ * <tr><td> "../a" </td><td> true </td><td> "../a/" </td></tr>
+ * <tr><td> "../a" </td><td> false </td><td> "../a" </td></tr>
+ * <tr><td> "a" </td><td> true </td><td> "a/" </td></tr>
+ * <tr><td> "a" </td><td> false </td><td> "a" </td></tr>
+ * <tr><td> "a//" </td><td> true </td><td> "a/" </td></tr>
+ * <tr><td> "a//" </td><td> false </td><td> "a" </td></tr>
+ * <tr><td> "a///b" </td><td> true </td><td> "a/b/" </td></tr>
+ * <tr><td> "a///b" </td><td> false </td><td> "a/b" </td></tr>
+ * <tr><td> "/" </td><td> anything </td><td> "/" </td></tr>
+ * <tr><td> "/." </td><td> anything </td><td> "/" </td></tr>
+ * <tr><td> "/.." </td><td> anything </td><td> "/" </td></tr>
+ * <tr><td> "/a/.." </td><td> anything </td><td> "/" </td></tr>
+ * <tr><td> "/a" </td><td> true </td><td> "/a/" </td></tr>
+ * <tr><td> "/a" </td><td> false </td><td> "/a" </td></tr>
+ * <tr><td> "/a/" </td><td> true </td><td> "/a/" </td></tr>
+ * <tr><td> "/a/" </td><td> false </td><td> "/a" </td></tr>
+ * <tr><td> "/../a/" </td><td> true </td><td> "/a/" </td></tr>
+ * <tr><td> "/../a/" </td><td> false </td><td> "/a" </td></tr>
+ * </table>
+ */
+ std::string prettyPath(const std::string& p, bool isDirectory);
+
+ //! pretty print path
+ /**
+ * \param p Path to pretty-print.
+ *
+ * This is like prettyPath(const std::string& p, bool isDirectory) with
+ * isDirectory automatically determined using pathIndicatesDirectory(p).
+ */
+ std::string prettyPath(const std::string& p);
+
+ //! compute a relative path between two paths
+ /**
+ * \param newbase Base path for the resulting relative path.
+ * \param p Path re sulting path should resolve to, when taken
+ * reltively to newbase.
+ *
+ * Compute a relative path from newbase to p. newbase is assumed to be a
+ * directory. p and newbase should either both be absolute, or both be
+ * relative. In the latter case they are assumed to both be relative to
+ * the same unspecified directory. The has the form of something sanitized
+ * by processPath().
+ *
+ * \throw NotImplemented The condition that newbase and p must both be
+ * relative or both be absolute does not hold.
+ * \throw NotImplemented After sanitization newbase has more leading ".."
+ * components than p.
+ */
+ std::string relativePath(const std::string& newbase, const std::string& p);
+
+ /** @} group Path */
+}
+
+#endif // DUNE_COMMON_PATH_HH
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_COMMON_POOLALLOCATOR_HH
+#define DUNE_COMMON_POOLALLOCATOR_HH
+
+/** \file
+ * \brief An stl-compliant pool allocator
+ */
+
+#include <numeric>
+#include <typeinfo>
+#include <iostream>
+#include <cassert>
+#include <new>
+
+#ifndef DOXYGEN
+// forward declarations.
+// we need to know the test function to declare it friend
+template<std::size_t size, typename T>
+struct testPoolMain;
+#endif
+
+namespace Dune
+{
+
+ template<typename T, std::size_t s>
+ class Pool;
+
+ template<typename T, std::size_t s>
+ class PoolAllocator;
+
+}
+
+namespace std
+{
+ /*
+ template<class T, std::size_t S>
+ inline ostream& operator<<(ostream& os, Dune::Pool<T,S>& pool)
+ {
+ os<<"pool="<<&pool<<" allocated_="<<pool.allocated_;
+ return os;
+ }
+
+ template<class T, std::size_t S>
+ inline ostream& operator<<(ostream& os, Dune::PoolAllocator<T,S>& pool)
+ {
+ os<<pool.memoryPool_<<std::endl;
+ return os;
+ }
+ */
+}
+
+
+namespace Dune
+{
+ /**
+ * @file
+ * This file implements the classes Pool and PoolAllocator providing
+ * memory allocation for objects in chunks.
+ * @author Markus Blatt
+ */
+ /**
+ * @addtogroup Allocators
+ *
+ * @{
+ */
+
+ /**
+ * @brief A memory pool of objects.
+ *
+ * The memory for the objects is organized in chunks.
+ * Each chunks is capable of holding a specified number of
+ * objects. The allocated objects will be properly aligned
+ * for fast access.
+ * Deallocated objects are cached for reuse to prevent memory
+ * fragmentation.
+ * @warning If the size of the objects allocated is less than the
+ * size of a pointer memory is wasted.
+ * @warning Due to aligned issues at the number of bytes of the
+ * alignment prerequisite (< 4 bytes) are wasted. This effect
+ * becomes negligible for big sizes of chunkSize.
+ *
+ * \tparam T The type that is allocated by us.
+ * \tparam s The size of a memory chunk in bytes.
+ */
+ template<class T, std::size_t s>
+ class Pool
+ {
+ // make the test function friend
+ friend struct ::testPoolMain<s,T>;
+
+ //friend std::ostream& std::operator<<<>(std::ostream&,Pool<T,s>&);
+ template< class, std::size_t > friend class PoolAllocator;
+
+ private:
+
+ /** @brief Reference to next free element. */
+ struct Reference
+ {
+ Reference *next_;
+ };
+
+ public:
+
+ /** @brief The type of object we allocate memory for. */
+ typedef T MemberType;
+ enum
+ {
+ /**
+ * @brief The size of a union of Reference and MemberType.
+ */
+ unionSize = ((sizeof(MemberType) < sizeof(Reference)) ?
+ sizeof(Reference) : sizeof(MemberType)),
+
+ /**
+ * @brief Size requirement. At least one object has to
+ * stored.
+ */
+ size = ((sizeof(MemberType) <= s && sizeof(Reference) <= s) ?
+ s : unionSize),
+
+ /**
+ * @brief The alignment that suits both the MemberType and
+ * the Reference (i.e. their least common multiple).
+ */
+ alignment = std::lcm(alignof(MemberType), alignof(Reference)),
+
+ /**
+ * @brief The aligned size of the type.
+ *
+ * This size is bigger than sizeof of the type and a multiple of
+ * the alignment requirement.
+ */
+ alignedSize = ((unionSize % alignment == 0) ?
+ unionSize :
+ ((unionSize / alignment + 1) * alignment)),
+
+ /**
+ * @brief The size of each chunk memory chunk.
+ *
+ * Will be adapted to be a multiple of the alignment
+ */
+ chunkSize = ((size % alignment == 0) ?
+ size : ((size / alignment + 1)* alignment)),
+
+ /**
+ * @brief The number of element each chunk can hold.
+ */
+ elements = (chunkSize / alignedSize)
+ };
+
+ private:
+ /** @brief Chunk of memory managed by the pool. */
+ struct Chunk
+ {
+
+ //friend int testPool<s,T>();
+
+ /** @brief The memory we hold. */
+ alignas(alignment) char chunk_[chunkSize];
+
+ /** @brief The next element */
+ Chunk *next_;
+ };
+
+ public:
+ /** @brief Constructor. */
+ inline Pool();
+ /** @brief Destructor. */
+ inline ~Pool();
+ /**
+ * @brief Get a new or recycled object
+ * @return A pointer to the object memory.
+ */
+ inline void* allocate();
+ /**
+ * @brief Free an object.
+ * @param o The pointer to memory block of the object.
+ */
+ inline void free(void* o);
+
+ /**
+ * @brief Print elements in pool for debugging.
+ */
+ inline void print(std::ostream& os);
+
+ private:
+
+ // Prevent Copying!
+ Pool(const Pool<MemberType,s>&);
+
+ void operator=(const Pool<MemberType,s>& pool) const;
+ /** @brief Grow our pool.*/
+ inline void grow();
+ /** @brief The first free element. */
+ Reference *head_;
+ /** @brief Our memory chunks. */
+ Chunk *chunks_;
+ /* @brief The number of currently allocated elements. */
+ //size_t allocated_;
+
+ };
+
+ /**
+ * @brief An allocator managing a pool of objects for reuse.
+ *
+ * This allocator is specifically useful for small data types
+ * where new and delete are too expensive.
+ *
+ * It uses a pool of memory chunks where the objects will be allocated.
+ * This means that assuming that N objects fit into memory only every N-th
+ * request for an object will result in memory allocation.
+ *
+ * @warning It is not suitable
+ * for the use in standard containers as it cannot allocate
+ * arrays of arbitrary size
+ *
+ * \tparam T The type that will be allocated.
+ * \tparam s The number of elements to fit into one memory chunk.
+ */
+ template<class T, std::size_t s>
+ class PoolAllocator
+ {
+ //friend std::ostream& std::operator<<<>(std::ostream&,PoolAllocator<T,s>&);
+
+ public:
+ /**
+ * @brief Type of the values we construct and allocate.
+ */
+ typedef T value_type;
+
+ enum
+ {
+ /**
+ * @brief The number of objects to fit into one memory chunk
+ * allocated.
+ */
+ size=s*sizeof(value_type)
+ };
+
+ /**
+ * @brief The pointer type.
+ */
+ typedef T* pointer;
+
+ /**
+ * @brief The constant pointer type.
+ */
+ typedef const T* const_pointer;
+
+ /**
+ * @brief The reference type.
+ */
+ typedef T& reference;
+
+ /**
+ * @brief The constant reference type.
+ */
+ typedef const T& const_reference;
+
+ /**
+ * @brief The size type.
+ */
+ typedef std::size_t size_type;
+
+ /**
+ * @brief The difference_type.
+ */
+ typedef std::ptrdiff_t difference_type;
+
+ /**
+ * @brief Constructor.
+ */
+ inline PoolAllocator();
+
+ /**
+ * @brief Copy Constructor that does not copy the memory pool.
+ */
+ template<typename U, std::size_t u>
+ inline PoolAllocator(const PoolAllocator<U,u>&)
+ {
+ // we allow copying but never copy the pool
+ // to have a clear ownership of allocated pointers.
+ }
+
+ /// \brief Copy constructor that does not copy the memory pool.
+ PoolAllocator(const PoolAllocator&)
+ {
+ // we allow copying but never copy the pool
+ // to have a clear ownership of allocated pointers.
+ // For this behaviour we have to implement
+ // the copy constructor, because the default
+ // one would copy the pool and deallocation
+ // of it would break.
+ }
+ /**
+ * @brief Allocates objects.
+ * @param n The number of objects to allocate. Has to be one!
+ * @param hint Ignored hint.
+ * @return A pointer tp the allocated elements.
+ */
+ inline pointer allocate(std::size_t n, const_pointer hint=0);
+
+ /**
+ * @brief Free objects.
+ *
+ * Does not call the destructor!
+ * @param n The number of objects to free. Has to be one!
+ * @param p Pointer to the first object.
+ */
+ inline void deallocate(pointer p, std::size_t n);
+
+ /**
+ * @brief Construct an object.
+ * @param p Pointer to the object.
+ * @param value The value to initialize it to.
+ */
+ inline void construct(pointer p, const_reference value);
+
+ /**
+ * @brief Destroy an object without freeing memory.
+ * @param p Pointer to the object.
+ */
+ inline void destroy(pointer p);
+
+ /**
+ * @brief Convert a reference to a pointer.
+ */
+ inline pointer address(reference x) const { return &x; }
+
+
+ /**
+ * @brief Convert a reference to a pointer.
+ */
+ inline const_pointer address(const_reference x) const { return &x; }
+
+ /**
+ * @brief Not correctly implemented, yet!
+ */
+ inline int max_size() const noexcept { return 1; }
+
+ /**
+ * @brief Rebind the allocator to another type.
+ */
+ template<class U>
+ struct rebind
+ {
+ typedef PoolAllocator<U,s> other;
+ };
+
+ /** @brief The type of the memory pool we use. */
+ typedef Pool<T,size> PoolType;
+
+ private:
+ /**
+ * @brief The underlying memory pool.
+ */
+ PoolType memoryPool_;
+ };
+
+ // specialization for void
+ template <std::size_t s>
+ class PoolAllocator<void,s>
+ {
+ public:
+ typedef void* pointer;
+ typedef const void* const_pointer;
+ // reference to void members are impossible.
+ typedef void value_type;
+ template <class U> struct rebind
+ {
+ typedef PoolAllocator<U,s> other;
+ };
+ };
+
+
+ template<typename T1, std::size_t t1, typename T2, std::size_t t2>
+ bool operator==(const PoolAllocator<T1,t1>&, const PoolAllocator<T2,t2>&)
+ {
+ return false;
+ }
+
+
+ template<typename T1, std::size_t t1, typename T2, std::size_t t2>
+ bool operator!=(const PoolAllocator<T1,t1>&, const PoolAllocator<T2,t2>&)
+ {
+ return true;
+ }
+
+ template<typename T, std::size_t t1, std::size_t t2>
+ bool operator==(const PoolAllocator<T,t1>& p1, const PoolAllocator<T,t2>& p2)
+ {
+ return &p1==&p2;
+ }
+
+
+ template<typename T, std::size_t t1, std::size_t t2>
+ bool operator!=(const PoolAllocator<T,t1>& p1, const PoolAllocator<T,t2>& p2)
+ {
+ return &p1 != &p2;
+ }
+
+ template<typename T, std::size_t t1, std::size_t t2>
+ bool operator==(const PoolAllocator<void,t1>&, const PoolAllocator<T,t2>&)
+ {
+ return false;
+ }
+
+
+ template<typename T, std::size_t t1, std::size_t t2>
+ bool operator!=(const PoolAllocator<void,t1>&, const PoolAllocator<T,t2>&)
+ {
+ return true;
+ }
+
+ template<std::size_t t1, std::size_t t2>
+ bool operator==(const PoolAllocator<void,t1>& p1, const PoolAllocator<void,t2>& p2)
+ {
+ return &p1==&p2;
+ }
+
+ template<std::size_t t1, std::size_t t2>
+ bool operator!=(const PoolAllocator<void,t1>& p1, const PoolAllocator<void,t2>& p2)
+ {
+ return &p1!=&p2;
+ }
+
+ template<class T, std::size_t S>
+ inline Pool<T,S>::Pool()
+ : head_(0), chunks_(0) //, allocated_(0)
+ {
+ static_assert(sizeof(T)<=unionSize, "Library Error: type T is too big");
+ static_assert(sizeof(Reference)<=unionSize, "Library Error: type of referene is too big");
+ static_assert(unionSize<=alignedSize, "Library Error: alignedSize too small");
+ static_assert(sizeof(T)<=chunkSize, "Library Error: chunkSize must be able to hold at least one value");
+ static_assert(sizeof(Reference)<=chunkSize, "Library Error: chunkSize must be able to hold at least one reference");
+ static_assert(chunkSize % alignment == 0, "Library Error: compiler cannot calculate!");
+ static_assert(elements>=1, "Library Error: we need to hold at least one element!");
+ static_assert(elements*alignedSize<=chunkSize, "Library Error: aligned elements must fit into chuck!");
+ }
+
+ template<class T, std::size_t S>
+ inline Pool<T,S>::~Pool()
+ {
+ /*
+ if(allocated_!=0)
+ std::cerr<<"There are still "<<allocated_<<" allocated elements by the Pool<"<<typeid(T).name()<<","<<S<<"> "
+ <<static_cast<void*>(this)<<"! This is a memory leak and might result in segfaults"
+ <<std::endl;
+ */
+ // delete the allocated chunks.
+ Chunk *current=chunks_;
+
+ while(current!=0)
+ {
+ Chunk *tmp = current;
+ current = current->next_;
+ delete tmp;
+ }
+ }
+
+ template<class T, std::size_t S>
+ inline void Pool<T,S>::print(std::ostream& os)
+ {
+ Chunk* current=chunks_;
+ while(current) {
+ os<<current<<" ";
+ current=current->next_;
+ }
+ os<<current<<" ";
+ }
+
+ template<class T, std::size_t S>
+ inline void Pool<T,S>::grow()
+ {
+ Chunk *newChunk = new Chunk;
+ newChunk->next_ = chunks_;
+ chunks_ = newChunk;
+
+ char* start = chunks_->chunk_;
+ char* last = &start[elements*alignedSize];
+ Reference* ref = new (start) (Reference);
+
+ // grow is only called if head==0,
+ assert(!head_);
+
+ head_ = ref;
+
+ for(char* element=start+alignedSize; element<last; element=element+alignedSize) {
+ Reference* next = new (element) (Reference);
+ ref->next_ = next;
+ ref = next;
+ }
+ ref->next_=0;
+ }
+
+ template<class T, std::size_t S>
+ inline void Pool<T,S>::free(void* b)
+ {
+ if(b) {
+#ifndef NDEBUG
+ Chunk* current=chunks_;
+ while(current) {
+ if(static_cast<void*>(current->chunk_)<=b &&
+ static_cast<void*>(current->chunk_+chunkSize)>b)
+ break;
+ current=current->next_;
+ }
+ if(!current)
+ throw std::bad_alloc();
+#endif
+ Reference* freed = static_cast<Reference*>(b);
+ freed->next_ = head_;
+ head_ = freed;
+ //--allocated_;
+ }
+ else
+ {
+ std::cerr<< "Tried to free null pointer! "<<b<<std::endl;
+ throw std::bad_alloc();
+ }
+ }
+
+ template<class T, std::size_t S>
+ inline void* Pool<T,S>::allocate()
+ {
+ if(!head_)
+ grow();
+
+ Reference* p = head_;
+ head_ = p->next_;
+ //++allocated_;
+ return p;
+ }
+
+ template<class T, std::size_t s>
+ inline PoolAllocator<T,s>::PoolAllocator()
+ { }
+
+ template<class T, std::size_t s>
+ inline typename PoolAllocator<T,s>::pointer
+ PoolAllocator<T,s>::allocate(std::size_t n, const_pointer)
+ {
+ if(n==1)
+ return static_cast<T*>(memoryPool_.allocate());
+ else
+ throw std::bad_alloc();
+ }
+
+ template<class T, std::size_t s>
+ inline void PoolAllocator<T,s>::deallocate(pointer p, std::size_t n)
+ {
+ for(size_t i=0; i<n; i++)
+ memoryPool_.free(p++);
+ }
+
+ template<class T, std::size_t s>
+ inline void PoolAllocator<T,s>::construct(pointer p, const_reference value)
+ {
+ ::new (static_cast<void*>(p))T(value);
+ }
+
+ template<class T, std::size_t s>
+ inline void PoolAllocator<T,s>::destroy(pointer p)
+ {
+ p->~T();
+ }
+
+ /** @} */
+}
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_COMMON_POWER_HH
+#define DUNE_COMMON_POWER_HH
+
+/** \file
+ \brief Various implementations of the power function for run-time and static arguments
+ */
+
+#include <dune/common/math.hh>
+
+namespace Dune {
+
+ /** @addtogroup Common
+
+ @{
+ */
+
+ /** \brief Calculates m^p at compile time
+ * \deprecated Please use the method `power` from `math.hh` instead!
+ */
+ template <int m, int p>
+ struct StaticPower
+ {
+ /** \brief power stores m^p */
+ static constexpr int power = Dune::power(m,p);
+ };
+
+
+ /** \brief Compute power for a run-time mantissa and a compile-time integer exponent
+ *
+ * \deprecated Please use the method `power` from `math.hh` instead!
+ *
+ * \tparam p The exponent
+ */
+ template <int p>
+ struct Power
+ {
+ template <typename T>
+ static constexpr auto eval(const T & a)
+ {
+ return power(a,p);
+ }
+ };
+
+}
+
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_PRECISION_HH
+#define DUNE_PRECISION_HH
+
+/** \file
+ * \brief Various precision settings for calculations with FieldMatrix and FieldVector
+ */
+
+#include <stdlib.h>
+
+namespace Dune {
+
+ /**
+ @addtogroup DenseMatVec
+ @{
+ */
+
+ /**
+ * @brief Precisions for calculations with FieldMatrix and FieldVector.
+ */
+ template <class ctype = double>
+ class FMatrixPrecision {
+ public:
+ //! return threshold to declare matrix singular
+ static ctype absolute_limit ()
+ {
+ return _absolute;
+ }
+
+ //! set singular threshold
+ static void set_absolute_limit (ctype absthres)
+ {
+ _absolute = absthres;
+ }
+
+ private:
+ // just to demonstrate some state information
+ static ctype _absolute;
+ };
+
+ template <class ctype>
+ ctype FMatrixPrecision<ctype>::_absolute = 1E-80;
+
+ /** @} end documentation */
+
+} // end namespace
+
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_PROMOTIONTRAITS_HH
+#define DUNE_PROMOTIONTRAITS_HH
+
+#include <utility>
+
+namespace Dune {
+ /**
+ * @file
+ * @brief Compute type of the result of an arithmetic operation involving two different number types.
+ *
+ * @author Matthias Wohlmuth
+ */
+
+ /** @addtogroup Common
+ *
+ * @{
+ */
+
+ /** \brief Compute type of the result of an arithmetic operation involving two different number types.
+ */
+ template <typename T1, typename T2>
+ struct PromotionTraits
+ {
+ typedef decltype(std::declval<T1>()+std::declval<T2>()) PromotedType;
+ };
+
+ // Specialization for the case of two equal types
+ // One should think that the generic template should handle this case as well.
+ // However, the fvectortest.cc unit test fails without it if ENABLE_GMP is set.
+ template <typename T1>
+ struct PromotionTraits<T1,T1> { typedef T1 PromotedType; };
+
+ /** @} */
+} // end namespace
+
+
+#endif // DUNE_PROMOTIONTRAITS_HH
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_PROPERTYMAP_HH
+#define DUNE_PROPERTYMAP_HH
+
+#include <cstddef>
+#include <iterator>
+#include <type_traits>
+
+namespace Dune
+{
+
+ template<class PM>
+ struct PropertyMapTraits
+ {
+ /**
+ * @brief The type of the key of the property map.
+ */
+ typedef typename PM::KeyType KeyType;
+ /**
+ * @brief The type of the values of the property map.
+ */
+ typedef typename PM::ValueType ValueType;
+ /**
+ * @brief The type of the reference to the values.
+ */
+ typedef typename PM::Reference Reference;
+ /**
+ * @brief The category the property map belongs to.
+ */
+ typedef typename PM::Category Category;
+ };
+
+ /** @brief Tag for the category of readable property maps. */
+ struct ReadablePropertyMapTag
+ {};
+
+ /** @brief Tag for the category of writable property maps. */
+ struct WritablePropertyMapTag
+ {};
+
+ /**
+ * @brief Tag for the category of readable and writable property
+ * maps.
+ */
+ struct ReadWritePropertyMapTag
+ : public ReadablePropertyMapTag, public WritablePropertyMapTag
+ {};
+
+ /**
+ * @brief Tag for the category of lvalue property maps.
+ */
+ struct LvaluePropertyMapTag
+ : public ReadWritePropertyMapTag
+ {};
+
+ template<class T>
+ struct PropertyMapTraits<T*>
+ {
+ typedef T ValueType;
+ typedef ValueType& Reference;
+ typedef std::ptrdiff_t KeyType;
+ typedef LvaluePropertyMapTag Category;
+ };
+
+
+ template<class T>
+ struct PropertyMapTraits<const T*>
+ {
+ typedef T ValueType;
+ typedef const ValueType& Reference;
+ typedef std::ptrdiff_t KeyType;
+ typedef LvaluePropertyMapTag Category;
+ };
+
+ template<class Reference, class PropertyMap>
+ struct RAPropertyMapHelper
+ {};
+
+ template<class Reference, class PropertyMap, class Key>
+ inline Reference
+ get(const RAPropertyMapHelper<Reference,PropertyMap>& pmap,
+ const Key& key)
+ {
+ return static_cast<const PropertyMap&>(pmap)[key];
+ }
+
+ template<class Reference, class PropertyMap, class Key, class Value>
+ inline void
+ put(const RAPropertyMapHelper<Reference,PropertyMap>& pmap,
+ const Key& key, const Value& value)
+ {
+ static_assert(std::is_convertible<typename PropertyMap::Category,WritablePropertyMapTag>::value,
+ "WritablePropertyMapTag required!");
+ static_cast<const PropertyMap&>(pmap)[key] = value;
+ }
+
+ /**
+ * @brief Adapter to turn a random access iterator into a property map.
+ */
+ template<class RAI, class IM,
+ class T = typename std::iterator_traits<RAI>::value_type,
+ class R = typename std::iterator_traits<RAI>::reference>
+ class IteratorPropertyMap
+ : public RAPropertyMapHelper<R,IteratorPropertyMap<RAI,IM,T,R> >
+ {
+ public:
+ /**
+ * @brief The type of the random access iterator.
+ */
+ typedef RAI RandomAccessIterator;
+
+ /**
+ * @brief The type of the index map.
+ *
+ * This will convert the KeyType to std::ptrdiff_t via operator[]().
+ */
+ typedef IM IndexMap;
+
+ /**
+ * @brief The key type of the property map.
+ */
+ typedef typename IndexMap::KeyType KeyType;
+
+ /**
+ * @brief The value type of the property map.
+ */
+ typedef T ValueType;
+
+ /**
+ * @brief The reference type of the property map.
+ */
+ typedef R Reference;
+
+ /**
+ * @brief The category of this property map.
+ */
+ typedef LvaluePropertyMapTag Category;
+
+ /**
+ * @brief Constructor.
+ * @param iter The random access iterator that
+ * provides the mapping.
+ * @param im The index map that maps the KeyType
+ * to the difference_type of the iterator.
+ */
+ inline IteratorPropertyMap(RandomAccessIterator iter,
+ const IndexMap& im=IndexMap())
+ : iter_(iter), indexMap_(im)
+ {}
+
+ /** @brief Constructor. */
+ inline IteratorPropertyMap()
+ : iter_(), indexMap_()
+ {}
+
+ /** @brief Access the a value by reference. */
+ inline Reference operator[](KeyType key) const
+ {
+ return *(iter_ + get(indexMap_, key));
+ }
+
+ private:
+ /** @brief The underlying iterator. */
+ RandomAccessIterator iter_;
+ /** @brief The index map to use for the lookup. */
+ IndexMap indexMap_;
+ };
+
+ /**
+ * @brief An adapter to turn an unique associative container
+ * into a property map.
+ */
+ template<typename T>
+ class AssociativePropertyMap
+ : RAPropertyMapHelper<typename T::value_type::second_type&,
+ AssociativePropertyMap<T> >
+ {
+ /**
+ * @brief The type of the unique associative container.
+ */
+ typedef T UniqueAssociativeContainer;
+
+ /**
+ * @brief The key type of the property map.
+ */
+ typedef typename UniqueAssociativeContainer::value_type::first_type
+ KeyType;
+
+ /**
+ * @brief The value type of the property map.
+ */
+ typedef typename UniqueAssociativeContainer::value_type::second_type
+ ValueType;
+
+ /**
+ * @brief The reference type of the property map.
+ */
+ typedef ValueType& Reference;
+
+ /**
+ * @brief The category of the property map.
+ */
+ typedef LvaluePropertyMapTag Category;
+
+ /** @brief Constructor */
+ inline AssociativePropertyMap()
+ : map_(0)
+ {}
+
+ /** @brief Constructor. */
+ inline AssociativePropertyMap(UniqueAssociativeContainer& map)
+ : map_(&map)
+ {}
+
+ /**
+ * @brief Access a property.
+ * @param key The key of the property.
+ */
+ inline Reference operator[](KeyType key) const
+ {
+ return map_->find(key)->second;
+ }
+ private:
+ UniqueAssociativeContainer* map_;
+ };
+
+ /**
+ * @brief An adaptor to turn an unique associative container
+ * into a property map.
+ */
+ template<typename T>
+ class ConstAssociativePropertyMap
+ : RAPropertyMapHelper<const typename T::value_type::second_type&,
+ ConstAssociativePropertyMap<T> >
+ {
+ /**
+ * @brief The type of the unique associative container.
+ */
+ typedef T UniqueAssociativeContainer;
+
+ /**
+ * @brief The key type of the property map.
+ */
+ typedef typename UniqueAssociativeContainer::value_type::first_type
+ KeyType;
+
+ /**
+ * @brief The value type of the property map.
+ */
+ typedef typename UniqueAssociativeContainer::value_type::second_type
+ ValueType;
+
+ /**
+ * @brief The reference type of the property map.
+ */
+ typedef const ValueType& Reference;
+
+ /**
+ * @brief The category of the property map.
+ */
+ typedef LvaluePropertyMapTag Category;
+
+ /** @brief Constructor */
+ inline ConstAssociativePropertyMap()
+ : map_(0)
+ {}
+
+ /** @brief Constructor. */
+ inline ConstAssociativePropertyMap(const UniqueAssociativeContainer& map)
+ : map_(&map)
+ {}
+
+ /**
+ * @brief Access a property.
+ * @param key The key of the property.
+ */
+ inline Reference operator[](KeyType key) const
+ {
+ return map_->find(key)->second;
+ }
+ private:
+ const UniqueAssociativeContainer* map_;
+ };
+
+ /**
+ * @brief A property map that applies the identity function to integers.
+ */
+ struct IdentityMap
+ : public RAPropertyMapHelper<std::size_t, IdentityMap>
+ {
+ /** @brief The key type of the map. */
+ typedef std::size_t KeyType;
+
+ /** @brief The value type of the map. */
+ typedef std::size_t ValueType;
+
+ /** @brief The reference type of the map. */
+ typedef std::size_t Reference;
+
+ /** @brief The category of the map. */
+ typedef ReadablePropertyMapTag Category;
+
+ inline ValueType operator[](const KeyType& key) const
+ {
+ return key;
+ }
+ };
+
+
+ /**
+ * @brief Selector for the property map type.
+ *
+ * If present the type of the property map is accessible via the typedef Type.
+ */
+ template<typename T, typename C>
+ struct PropertyMapTypeSelector
+ {
+ /**
+ * @brief the tag identifying the property.
+ */
+ typedef T Tag;
+ /**
+ * @brief The container type to whose entries the properties
+ * are attached.
+ */
+ typedef C Container;
+ };
+
+}
+
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_COMMON_PROXYMEMBERACCESS_HH
+#define DUNE_COMMON_PROXYMEMBERACCESS_HH
+
+/**
+ * \file
+ * \brief infrastructure for supporting operator->() on both references and proxies
+ * \ingroup CxxUtilities
+ */
+
+#include <type_traits>
+#include <utility>
+
+namespace Dune {
+
+ namespace Impl {
+
+ // helper struct to store a temporary / proxy
+ // for the duration of the member access
+ template<typename T>
+ struct member_access_proxy_holder
+ {
+
+ // only support moving the temporary into the holder object
+ member_access_proxy_holder(T&& t)
+ : _t(std::move(t))
+ {}
+
+ // The object is fundamentally a temporary, i.e. an rvalue,
+ //
+ const T* operator->() const
+ {
+ return &_t;
+ }
+
+ T _t;
+
+ };
+
+ } // end Impl namespace
+
+
+#ifdef DOXYGEN
+
+ //! Transparent support for providing member access to both lvalues and rvalues (temporary proxies).
+ /**
+ * If an iterator facade (like entity iterators) wants to allow the embedded implementation to
+ * return either an (internally stored) reference or a temporary object and expose these two
+ * behaviors to enable performance optimizations, operator->() needs special handling: If the
+ * implementation returns a reference, operator->() in the facade can simply return the address
+ * of the referenced object, but if the returned object is a temporary, we need to capture and
+ * store it in a helper object to make sure it outlives the member access. This function transparently
+ * supports both variants. It should be used like this:
+ *
+ * \code
+ * class iterator
+ * {
+ * ...
+ *
+ * decltype(handle_proxy_member_access(implementation.dereference()))
+ * operator->() const
+ * {
+ * return handle_proxy_member_access(implementation.dereference());
+ * }
+ *
+ * ...
+ * };
+ * \endcode
+ *
+ * \note This function exploits the special type deduction rules for unqualified rvalue references
+ * to distinguish between lvalues and rvalues and thus needs to be passed the object returned
+ * by the implementation.
+ *
+ * \ingroup CxxUtilities
+ */
+ template<typename T>
+ pointer_or_proxy_holder
+ handle_proxy_member_access(T&& t);
+
+#else // DOXYGEN
+
+
+ // This version matches lvalues (the C++ type deduction rules state that
+ // the T&& signature deduces to a reference iff the argument is an lvalue).
+ // As the argument is an lvalue, we do not have to worry about its lifetime
+ // and can just return its address.
+ template<typename T>
+ inline typename std::enable_if<
+ std::is_lvalue_reference<T>::value,
+ typename std::add_pointer<
+ typename std::remove_reference<
+ T
+ >::type
+ >::type
+ >::type
+ handle_proxy_member_access(T&& target)
+ {
+ return ⌖
+ }
+
+ // This version matches rvalues (the C++ type deduction rules state that
+ // the T&& signature deduces to a non-reference iff the argument is an rvalue).
+ // In this case, we have to capture the rvalue in a new object to make sure it
+ // is kept alive for the duration of the member access. For this purpose, we move
+ // it into a member_access_proxy_holder instance.
+ template<typename T>
+ inline typename std::enable_if<
+ !std::is_lvalue_reference<T>::value,
+ Impl::member_access_proxy_holder<T>
+ >::type
+ handle_proxy_member_access(T&& target)
+ {
+ return {std::forward<T>(target)};
+ }
+
+#endif // DOXYGEN
+
+} // namespace Dune
+
+#endif // DUNE_COMMON_PROXYMEMBERACCESS_HH
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_QUADMATH_HH
+#define DUNE_QUADMATH_HH
+
+#if HAVE_QUADMATH
+#include <quadmath.h>
+
+#include <cmath>
+#include <cstddef>
+#include <cstdint>
+#include <cstdlib> // abs
+#include <istream>
+#include <ostream>
+#include <type_traits>
+#include <utility>
+
+#include <dune/common/exceptions.hh>
+#include <dune/common/typetraits.hh>
+
+namespace Dune
+{
+ namespace Impl
+ {
+ // forward declaration
+ class Float128;
+
+ } // end namespace Impl
+
+ using Impl::Float128;
+
+ // The purpose of this namespace is to move the `<cmath>` function overloads
+ // out of namespace `Dune`, see AlignedNumber in debugalign.hh.
+ namespace Impl
+ {
+ using float128_t = __float128;
+
+ /// Wrapper for quad-precision type __float128
+ class Float128
+ {
+ float128_t value_ = 0.0q;
+
+ public:
+ constexpr Float128() = default;
+ constexpr Float128(const float128_t& value) noexcept
+ : value_(value)
+ {}
+
+ // constructor from any floating-point or integer type
+ template <class T,
+ std::enable_if_t<std::is_arithmetic<T>::value, int> = 0>
+ constexpr Float128(const T& value) noexcept
+ : value_(value)
+ {}
+
+ // constructor from pointer to null-terminated byte string
+ explicit Float128(const char* str) noexcept
+ : value_(strtoflt128(str, NULL))
+ {}
+
+ // accessors
+ constexpr operator float128_t() const noexcept { return value_; }
+
+ constexpr float128_t const& value() const noexcept { return value_; }
+ constexpr float128_t& value() noexcept { return value_; }
+
+ // I/O
+ template<class CharT, class Traits>
+ friend std::basic_istream<CharT, Traits>&
+ operator>>(std::basic_istream<CharT, Traits>& in, Float128& x)
+ {
+ std::string buf;
+ buf.reserve(128);
+ in >> buf;
+ x.value() = strtoflt128(buf.c_str(), NULL);
+ return in;
+ }
+
+ template<class CharT, class Traits>
+ friend std::basic_ostream<CharT, Traits>&
+ operator<<(std::basic_ostream<CharT, Traits>& out, const Float128& x)
+ {
+ const std::size_t bufSize = 128;
+ CharT buf[128];
+
+ std::string format = "%." + std::to_string(out.precision()) + "Q" +
+ ((out.flags() | std::ios_base::scientific) ? "e" : "f");
+ const int numChars = quadmath_snprintf(buf, bufSize, format.c_str(), x.value());
+ if (std::size_t(numChars) >= bufSize) {
+ DUNE_THROW(Dune::RangeError, "Failed to print Float128 value: buffer overflow");
+ }
+ out << buf;
+ return out;
+ }
+
+ // Increment, decrement
+ constexpr Float128& operator++() noexcept { ++value_; return *this; }
+ constexpr Float128& operator--() noexcept { --value_; return *this; }
+
+ constexpr Float128 operator++(int) noexcept { Float128 tmp{*this}; ++value_; return tmp; }
+ constexpr Float128 operator--(int) noexcept { Float128 tmp{*this}; --value_; return tmp; }
+
+ // unary operators
+ constexpr Float128 operator+() const noexcept { return Float128{+value_}; }
+ constexpr Float128 operator-() const noexcept { return Float128{-value_}; }
+
+ // assignment operators
+#define DUNE_ASSIGN_OP(OP) \
+ constexpr Float128& operator OP(const Float128& u) noexcept \
+ { \
+ value_ OP float128_t(u); \
+ return *this; \
+ } \
+ static_assert(true, "Require semicolon to unconfuse editors")
+
+ DUNE_ASSIGN_OP(+=);
+ DUNE_ASSIGN_OP(-=);
+
+ DUNE_ASSIGN_OP(*=);
+ DUNE_ASSIGN_OP(/=);
+
+#undef DUNE_ASSIGN_OP
+
+ }; // end class Float128
+
+ // binary operators:
+ // For symmetry provide overloads with arithmetic types
+ // in the first or second argument.
+#define DUNE_BINARY_OP(OP) \
+ constexpr Float128 operator OP(const Float128& t, \
+ const Float128& u) noexcept \
+ { \
+ return Float128{float128_t(t) OP float128_t(u)}; \
+ } \
+ constexpr Float128 operator OP(const float128_t& t, \
+ const Float128& u) noexcept \
+ { \
+ return Float128{t OP float128_t(u)}; \
+ } \
+ constexpr Float128 operator OP(const Float128& t, \
+ const float128_t& u) noexcept \
+ { \
+ return Float128{float128_t(t) OP u}; \
+ } \
+ template <class T, \
+ std::enable_if_t<std::is_arithmetic<T>::value, int> = 0> \
+ constexpr Float128 operator OP(const T& t, \
+ const Float128& u) noexcept \
+ { \
+ return Float128{float128_t(t) OP float128_t(u)}; \
+ } \
+ template <class U, \
+ std::enable_if_t<std::is_arithmetic<U>::value, int> = 0> \
+ constexpr Float128 operator OP(const Float128& t, \
+ const U& u) noexcept \
+ { \
+ return Float128{float128_t(t) OP float128_t(u)}; \
+ } \
+ static_assert(true, "Require semicolon to unconfuse editors")
+
+ DUNE_BINARY_OP(+);
+ DUNE_BINARY_OP(-);
+ DUNE_BINARY_OP(*);
+ DUNE_BINARY_OP(/);
+
+#undef DUNE_BINARY_OP
+
+ // logical operators:
+ // For symmetry provide overloads with arithmetic types
+ // in the first or second argument.
+#define DUNE_BINARY_BOOL_OP(OP) \
+ constexpr bool operator OP(const Float128& t, \
+ const Float128& u) noexcept \
+ { \
+ return float128_t(t) OP float128_t(u); \
+ } \
+ template <class T, \
+ std::enable_if_t<std::is_arithmetic<T>::value, int> = 0> \
+ constexpr bool operator OP(const T& t, \
+ const Float128& u) noexcept \
+ { \
+ return float128_t(t) OP float128_t(u); \
+ } \
+ template <class U, \
+ std::enable_if_t<std::is_arithmetic<U>::value, int> = 0> \
+ constexpr bool operator OP(const Float128& t, \
+ const U& u) noexcept \
+ { \
+ return float128_t(t) OP float128_t(u); \
+ } \
+ static_assert(true, "Require semicolon to unconfuse editors")
+
+ DUNE_BINARY_BOOL_OP(==);
+ DUNE_BINARY_BOOL_OP(!=);
+ DUNE_BINARY_BOOL_OP(<);
+ DUNE_BINARY_BOOL_OP(>);
+ DUNE_BINARY_BOOL_OP(<=);
+ DUNE_BINARY_BOOL_OP(>=);
+
+#undef DUNE_BINARY_BOOL_OP
+
+ // Overloads for the cmath functions
+
+ // function with name `name` redirects to quadmath function `func`
+#define DUNE_UNARY_FUNC(name,func) \
+ inline Float128 name(const Float128& u) noexcept \
+ { \
+ return Float128{func (float128_t(u))}; \
+ } \
+ static_assert(true, "Require semicolon to unconfuse editors")
+
+ // like DUNE_UNARY_FUNC but with cutom return type
+#define DUNE_CUSTOM_UNARY_FUNC(type,name,func) \
+ inline type name(const Float128& u) noexcept \
+ { \
+ return (type)(func (float128_t(u))); \
+ } \
+ static_assert(true, "Require semicolon to unconfuse editors")
+
+ // redirects to quadmath function with two arguments
+#define DUNE_BINARY_FUNC(name,func) \
+ inline Float128 name(const Float128& t, \
+ const Float128& u) noexcept \
+ { \
+ return Float128{func (float128_t(t), float128_t(u))}; \
+ } \
+ static_assert(true, "Require semicolon to unconfuse editors")
+
+ DUNE_UNARY_FUNC(abs, fabsq);
+ DUNE_UNARY_FUNC(acos, acosq);
+ DUNE_UNARY_FUNC(acosh, acoshq);
+ DUNE_UNARY_FUNC(asin, asinq);
+ DUNE_UNARY_FUNC(asinh, asinhq);
+ DUNE_UNARY_FUNC(atan, atanq);
+ DUNE_UNARY_FUNC(atanh, atanhq);
+ DUNE_UNARY_FUNC(cbrt, cbrtq);
+ DUNE_UNARY_FUNC(ceil, ceilq);
+ DUNE_UNARY_FUNC(cos, cosq);
+ DUNE_UNARY_FUNC(cosh, coshq);
+ DUNE_UNARY_FUNC(erf, erfq);
+ DUNE_UNARY_FUNC(erfc, erfcq);
+ DUNE_UNARY_FUNC(exp, expq);
+ DUNE_UNARY_FUNC(expm1, expm1q);
+ DUNE_UNARY_FUNC(fabs, fabsq);
+ DUNE_UNARY_FUNC(floor, floorq);
+ DUNE_CUSTOM_UNARY_FUNC(int, ilogb, ilogbq);
+ DUNE_UNARY_FUNC(lgamma, lgammaq);
+ DUNE_CUSTOM_UNARY_FUNC(long long int, llrint, llrintq);
+ DUNE_CUSTOM_UNARY_FUNC(long long int, llround, llroundq);
+ DUNE_UNARY_FUNC(log, logq);
+ DUNE_UNARY_FUNC(log10, log10q);
+ DUNE_UNARY_FUNC(log1p, log1pq);
+ DUNE_UNARY_FUNC(log2, log2q);
+ // DUNE_UNARY_FUNC(logb, logbq); // not available in gcc5
+ DUNE_CUSTOM_UNARY_FUNC(long int, lrint, lrintq);
+ DUNE_CUSTOM_UNARY_FUNC(long int, lround, lroundq);
+ DUNE_UNARY_FUNC(nearbyint, nearbyintq);
+ DUNE_BINARY_FUNC(nextafter, nextafterq);
+ DUNE_BINARY_FUNC(pow, powq); // overload for integer argument see below
+ DUNE_UNARY_FUNC(rint, rintq);
+ DUNE_UNARY_FUNC(round, roundq);
+ DUNE_UNARY_FUNC(sin, sinq);
+ DUNE_UNARY_FUNC(sinh, sinhq);
+ DUNE_UNARY_FUNC(sqrt, sqrtq);
+ DUNE_UNARY_FUNC(tan, tanq);
+ DUNE_UNARY_FUNC(tanh, tanhq);
+ DUNE_UNARY_FUNC(tgamma, tgammaq);
+ DUNE_UNARY_FUNC(trunc, truncq);
+
+ DUNE_CUSTOM_UNARY_FUNC(bool, isfinite, finiteq);
+ DUNE_CUSTOM_UNARY_FUNC(bool, isinf, isinfq);
+ DUNE_CUSTOM_UNARY_FUNC(bool, isnan, isnanq);
+ DUNE_CUSTOM_UNARY_FUNC(bool, signbit, signbitq);
+
+#undef DUNE_UNARY_FUNC
+#undef DUNE_CUSTOM_UNARY_FUNC
+#undef DUNE_BINARY_FUNC
+
+ // like DUNE_BINARY_FUNC but provide overloads with arithmetic
+ // types in the first or second argument.
+#define DUNE_BINARY_ARITHMETIC_FUNC(name,func) \
+ inline Float128 name(const Float128& t, \
+ const Float128& u) noexcept \
+ { \
+ return Float128{func (float128_t(t), float128_t(u))}; \
+ } \
+ template <class T, \
+ std::enable_if_t<std::is_arithmetic<T>::value, int> = 0> \
+ inline Float128 name(const T& t, \
+ const Float128& u) noexcept \
+ { \
+ return Float128{func (float128_t(t), float128_t(u))}; \
+ } \
+ template <class U, \
+ std::enable_if_t<std::is_arithmetic<U>::value, int> = 0> \
+ inline Float128 name(const Float128& t, \
+ const U& u) noexcept \
+ { \
+ return Float128{func (float128_t(t), float128_t(u))}; \
+ } \
+ static_assert(true, "Require semicolon to unconfuse editors")
+
+ DUNE_BINARY_ARITHMETIC_FUNC(atan2,atan2q);
+ DUNE_BINARY_ARITHMETIC_FUNC(copysign,copysignq);
+ DUNE_BINARY_ARITHMETIC_FUNC(fdim,fdimq);
+ DUNE_BINARY_ARITHMETIC_FUNC(fmax,fmaxq);
+ DUNE_BINARY_ARITHMETIC_FUNC(fmin,fminq);
+ DUNE_BINARY_ARITHMETIC_FUNC(fmod,fmodq);
+ DUNE_BINARY_ARITHMETIC_FUNC(hypot,hypotq);
+ DUNE_BINARY_ARITHMETIC_FUNC(remainder,remainderq);
+
+#undef DUNE_BINARY_ARITHMETIC_FUNC
+
+ // some more cmath functions with special signature
+
+ inline Float128 fma(const Float128& t, const Float128& u, const Float128& v)
+ {
+ return Float128{fmaq(float128_t(t),float128_t(u),float128_t(v))};
+ }
+
+ inline Float128 frexp(const Float128& u, int* p)
+ {
+ return Float128{frexpq(float128_t(u), p)};
+ }
+
+ inline Float128 ldexp(const Float128& u, int p)
+ {
+ return Float128{ldexpq(float128_t(u), p)};
+ }
+
+ inline Float128 remquo(const Float128& t, const Float128& u, int* quo)
+ {
+ return Float128{remquoq(float128_t(t), float128_t(u), quo)};
+ }
+
+ inline Float128 scalbln(const Float128& u, long int e)
+ {
+ return Float128{scalblnq(float128_t(u), e)};
+ }
+
+ inline Float128 scalbn(const Float128& u, int e)
+ {
+ return Float128{scalbnq(float128_t(u), e)};
+ }
+
+ /// \brief Overload of `pow` function for integer exponents.
+ // NOTE: This is much faster than a pow(x, Float128(p)) call
+ // NOTE: This is a modified version of boost::math::cstdfloat::detail::pown
+ // (adapted to the type Float128) that is part of the Boost 1.65 Math toolkit 2.8.0
+ // and is implemented by Christopher Kormanyos, John Maddock, and Paul A. Bristow,
+ // distributed under the Boost Software License, Version 1.0
+ // (See http://www.boost.org/LICENSE_1_0.txt)
+ template <class Int,
+ std::enable_if_t<std::is_integral<Int>::value, int> = 0>
+ inline Float128 pow(const Float128& x, const Int p)
+ {
+ static const Float128 max_value = FLT128_MAX;
+ static const Float128 min_value = FLT128_MIN;
+ static const Float128 inf_value = float128_t{1} / float128_t{0};
+
+ const bool isneg = (x < 0);
+ const bool isnan = (x != x);
+ const bool isinf = (isneg ? bool(-x > max_value) : bool(+x > max_value));
+
+ if (isnan) { return x; }
+ if (isinf) { return Float128{nanq("")}; }
+
+ const Float128 abs_x = (isneg ? -x : x);
+ if (p < Int(0)) {
+ if (abs_x < min_value)
+ return (isneg ? -inf_value : +inf_value);
+ else
+ return Float128(1) / pow(x, Int(-p));
+ }
+
+ if (p == Int(0)) { return Float128(1); }
+ if (p == Int(1)) { return x; }
+ if (abs_x > max_value)
+ return (isneg ? -inf_value : +inf_value);
+
+ if (p == Int(2)) { return (x * x); }
+ if (p == Int(3)) { return ((x * x) * x); }
+ if (p == Int(4)) { const Float128 x2 = (x * x); return (x2 * x2); }
+
+ Float128 result = ((p % Int(2)) != Int(0)) ? x : Float128(1);
+ Float128 xn = x; // binary powers of x
+
+ Int p2 = p;
+ while (Int(p2 /= 2) != Int(0)) {
+ xn *= xn; // Square xn for each binary power
+
+ const bool has_binary_power = (Int(p2 % Int(2)) != Int(0));
+ if (has_binary_power)
+ result *= xn;
+ }
+
+ return result;
+ }
+
+
+ } // end namespace Impl
+
+ template <>
+ struct IsNumber<Impl::Float128>
+ : public std::true_type {};
+
+} // end namespace Dune
+
+namespace std
+{
+#ifndef NO_STD_NUMERIC_LIMITS_SPECIALIZATION
+ template <>
+ class numeric_limits<Dune::Impl::Float128>
+ {
+ using Float128 = Dune::Impl::Float128;
+ using float128_t = Dune::Impl::float128_t;
+
+ public:
+ static constexpr bool is_specialized = true;
+ static constexpr Float128 min() noexcept { return FLT128_MIN; }
+ static constexpr Float128 max() noexcept { return FLT128_MAX; }
+ static constexpr Float128 lowest() noexcept { return -FLT128_MAX; }
+ static constexpr int digits = FLT128_MANT_DIG;
+ static constexpr int digits10 = 34;
+ static constexpr int max_digits10 = 36;
+ static constexpr bool is_signed = true;
+ static constexpr bool is_integer = false;
+ static constexpr bool is_exact = false;
+ static constexpr int radix = 2;
+ static constexpr Float128 epsilon() noexcept { return FLT128_EPSILON; }
+ static constexpr Float128 round_error() noexcept { return float128_t{0.5}; }
+ static constexpr int min_exponent = FLT128_MIN_EXP;
+ static constexpr int min_exponent10 = FLT128_MIN_10_EXP;
+ static constexpr int max_exponent = FLT128_MAX_EXP;
+ static constexpr int max_exponent10 = FLT128_MAX_10_EXP;
+ static constexpr bool has_infinity = true;
+ static constexpr bool has_quiet_NaN = true;
+ static constexpr bool has_signaling_NaN = false;
+ static constexpr float_denorm_style has_denorm = denorm_present;
+ static constexpr bool has_denorm_loss = false;
+ static constexpr Float128 infinity() noexcept { return float128_t{1}/float128_t{0}; }
+ static Float128 quiet_NaN() noexcept { return nanq(""); }
+ static constexpr Float128 signaling_NaN() noexcept { return float128_t{}; }
+ static constexpr Float128 denorm_min() noexcept { return FLT128_DENORM_MIN; }
+ static constexpr bool is_iec559 = true;
+ static constexpr bool is_bounded = false;
+ static constexpr bool is_modulo = false;
+ static constexpr bool traps = false;
+ static constexpr bool tinyness_before = false;
+ static constexpr float_round_style round_style = round_to_nearest;
+ };
+#endif
+} // end namespace std
+
+#endif // HAVE_QUADMATH
+#endif // DUNE_QUADMATH_HH
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_COMMON_RANGE_UTILITIES_HH
+#define DUNE_COMMON_RANGE_UTILITIES_HH
+
+#include <dune/common/typetraits.hh>
+#include <algorithm>
+#include <utility>
+#include <type_traits>
+#include <bitset>
+
+/**
+ * \file
+ *
+ * \brief Utilities for reduction like operations on ranges
+ * \author Christian Engwer
+ */
+
+namespace Dune
+{
+
+ /**
+ * @addtogroup RangeUtilities
+ * @{
+ */
+
+ /**
+ \brief compute the maximum value over a range
+
+ overloads for scalar values, and ranges exist
+ */
+ template <typename T,
+ typename std::enable_if<IsIterable<T>::value, int>::type = 0>
+ typename T::value_type
+ max_value(const T & v) {
+ using std::max_element;
+ return *max_element(v.begin(), v.end());
+ }
+
+ template <typename T,
+ typename std::enable_if<!IsIterable<T>::value, int>::type = 0>
+ const T & max_value(const T & v) { return v; }
+
+ /**
+ \brief compute the minimum value over a range
+
+ overloads for scalar values, and ranges exist
+ */
+ template <typename T,
+ typename std::enable_if<IsIterable<T>::value, int>::type = 0>
+ typename T::value_type
+ min_value(const T & v) {
+ using std::min_element;
+ return *min_element(v.begin(), v.end());
+ }
+
+ template <typename T,
+ typename std::enable_if<!IsIterable<T>::value, int>::type = 0>
+ const T & min_value(const T & v) { return v; }
+
+ /**
+ \brief similar to std::bitset<N>::any() return true, if any entries is true
+
+ overloads for scalar values, ranges, and std::bitset<N> exist
+ */
+ template <typename T,
+ typename std::enable_if<IsIterable<T>::value, int>::type = 0>
+ bool any_true(const T & v) {
+ bool b = false;
+ for (const auto & e : v)
+ b = b or bool(e);
+ return b;
+ }
+
+ template <typename T,
+ typename std::enable_if<!IsIterable<T>::value, int>::type = 0>
+ bool any_true(const T & v) { return v; }
+
+ template<std::size_t N>
+ bool any_true(const std::bitset<N> & b)
+ {
+ return b.any();
+ }
+
+ /**
+ \brief similar to std::bitset<N>::all() return true, if any entries is true
+
+ overloads for scalar values, ranges, and std::bitset<N> exist
+ */
+ template <typename T,
+ typename std::enable_if<IsIterable<T>::value, int>::type = 0>
+ bool all_true(const T & v) {
+ bool b = true;
+ for (const auto & e : v)
+ b = b and bool(e);
+ return b;
+ }
+
+ template <typename T,
+ typename std::enable_if<!IsIterable<T>::value, int>::type = 0>
+ bool all_true(const T & v) { return v; }
+
+ template<std::size_t N>
+ bool all_true(const std::bitset<N> & b)
+ {
+ return b.all();
+ }
+
+
+
+ namespace Impl
+ {
+
+ template <class T>
+ class IntegralRangeIterator
+ {
+ public:
+ typedef std::random_access_iterator_tag iterator_category;
+ typedef T value_type;
+ typedef std::make_signed_t<T> difference_type;
+ typedef const T *pointer;
+ typedef T reference;
+
+ constexpr IntegralRangeIterator() noexcept : value_(0) {}
+ constexpr explicit IntegralRangeIterator(value_type value) noexcept : value_(value) {}
+
+ pointer operator->() const noexcept { return &value_; }
+ constexpr reference operator*() const noexcept { return value_; }
+
+ constexpr reference operator[]( difference_type n ) const noexcept { return (value_ + n); }
+
+ constexpr bool operator==(const IntegralRangeIterator & other) const noexcept { return (value_ == other.value_); }
+ constexpr bool operator!=(const IntegralRangeIterator & other) const noexcept { return (value_ != other.value_); }
+
+ constexpr bool operator<(const IntegralRangeIterator & other) const noexcept { return (value_ <= other.value_); }
+ constexpr bool operator<=(const IntegralRangeIterator & other) const noexcept { return (value_ <= other.value_); }
+ constexpr bool operator>(const IntegralRangeIterator & other) const noexcept { return (value_ >= other.value_); }
+ constexpr bool operator>=(const IntegralRangeIterator & other) const noexcept { return (value_ >= other.value_); }
+
+ IntegralRangeIterator& operator++() noexcept { ++value_; return *this; }
+ IntegralRangeIterator operator++(int) noexcept { IntegralRangeIterator copy( *this ); ++(*this); return copy; }
+
+ IntegralRangeIterator& operator--() noexcept { --value_; return *this; }
+ IntegralRangeIterator operator--(int) noexcept { IntegralRangeIterator copy( *this ); --(*this); return copy; }
+
+ IntegralRangeIterator& operator+=(difference_type n) noexcept { value_ += n; return *this; }
+ IntegralRangeIterator& operator-=(difference_type n) noexcept { value_ -= n; return *this; }
+
+ friend constexpr IntegralRangeIterator operator+(const IntegralRangeIterator &a, difference_type n) noexcept { return IntegralRangeIterator(a.value_ + n); }
+ friend constexpr IntegralRangeIterator operator+(difference_type n, const IntegralRangeIterator &a) noexcept { return IntegralRangeIterator(a.value_ + n); }
+ friend constexpr IntegralRangeIterator operator-(const IntegralRangeIterator &a, difference_type n) noexcept { return IntegralRangeIterator(a.value_ - n); }
+
+ constexpr difference_type operator-(const IntegralRangeIterator &other) const noexcept { return (static_cast<difference_type>(value_) - static_cast<difference_type>(other.value_)); }
+
+ private:
+ value_type value_;
+ };
+
+ } // namespace Impl
+
+
+
+ /**
+ * \brief dynamic integer range for use in range-based for loops
+ *
+ * \note This range can also be used in Hybrid::forEach, resulting in a dynamic
+ * for loop over the contained integers.
+ *
+ * \tparam T type of integers contained in the range
+ **/
+ template <class T>
+ class IntegralRange
+ {
+ public:
+ /** \brief type of integers contained in the range **/
+ typedef T value_type;
+ /** \brief type of iterator **/
+ typedef Impl::IntegralRangeIterator<T> iterator;
+ /** \brief unsigned integer type corresponding to value_type **/
+ typedef std::make_unsigned_t<T> size_type;
+
+ /** \brief construct integer range [from, to) **/
+ constexpr IntegralRange(value_type from, value_type to) noexcept : from_(from), to_(to) {}
+ /** \brief construct integer range [0, to) **/
+ constexpr explicit IntegralRange(value_type to) noexcept : from_(0), to_(to) {}
+ /** \brief construct integer range std::pair **/
+ constexpr IntegralRange(std::pair<value_type, value_type> range) noexcept : from_(range.first), to_(range.second) {}
+
+ /** \brief obtain a random-access iterator to the first element **/
+ constexpr iterator begin() const noexcept { return iterator(from_); }
+ /** \brief obtain a random-access iterator past the last element **/
+ constexpr iterator end() const noexcept { return iterator(to_); }
+
+ /** \brief access specified element **/
+ constexpr value_type operator[](const value_type &i) const noexcept { return (from_ + i); }
+
+ /** \brief check whether the range is empty **/
+ constexpr bool empty() const noexcept { return (from_ == to_); }
+ /** \brief obtain number of elements in the range **/
+ constexpr size_type size() const noexcept { return (static_cast<size_type>(to_) - static_cast<size_type>(from_)); }
+
+ private:
+ value_type from_, to_;
+ };
+
+
+ /**
+ * \brief static integer range for use in range-based for loops
+ *
+ * This is a compile-time static variant of the IntegralRange. Apart from
+ * returning all range information statically, it casts into the corresponding
+ * std::integer_sequence.
+ *
+ * \note This range can also be used in Hybrid::forEach, resulting in a static
+ * for loop over the contained integers like a std::integer_sequence.
+ *
+ * \tparam T type of integers contained in the range
+ * \tparam to first element not contained in the range
+ * \tparam from first element contained in the range, defaults to 0
+ **/
+ template <class T, T to, T from = 0>
+ class StaticIntegralRange
+ {
+ template <T ofs, T... i>
+ static std::integer_sequence<T, (i+ofs)...> shift_integer_sequence(std::integer_sequence<T, i...>);
+
+ public:
+ /** \brief type of integers contained in the range **/
+ typedef T value_type;
+ /** \brief type of iterator **/
+ typedef Impl::IntegralRangeIterator<T> iterator;
+ /** \brief unsigned integer type corresponding to value_type **/
+ typedef std::make_unsigned_t<T> size_type;
+
+ /** \brief type of corresponding std::integer_sequence **/
+ typedef decltype(shift_integer_sequence<from>(std::make_integer_sequence<T, to-from>())) integer_sequence;
+
+ /** \brief default constructor **/
+ constexpr StaticIntegralRange() noexcept = default;
+
+ /** \brief cast into dynamic IntegralRange **/
+ constexpr operator IntegralRange<T>() const noexcept { return {from, to}; }
+ /** \brief cast into corresponding std::integer_sequence **/
+ constexpr operator integer_sequence() const noexcept { return {}; }
+
+ /** \brief obtain a random-access iterator to the first element **/
+ static constexpr iterator begin() noexcept { return iterator(from); }
+ /** \brief obtain a random-access iterator past the last element **/
+ static constexpr iterator end() noexcept { return iterator(to); }
+
+ /** \brief access specified element (static version) **/
+ template <class U, U i>
+ constexpr auto operator[](const std::integral_constant<U, i> &) const noexcept
+ -> std::integral_constant<value_type, from + static_cast<value_type>(i)>
+ {
+ return {};
+ }
+
+ /** \brief access specified element (dynamic version) **/
+ constexpr value_type operator[](const size_type &i) const noexcept { return (from + static_cast<value_type>(i)); }
+
+ /** \brief check whether the range is empty **/
+ static constexpr std::integral_constant<bool, from == to> empty() noexcept { return {}; }
+ /** \brief obtain number of elements in the range **/
+ static constexpr std::integral_constant<size_type, static_cast<size_type>(to) - static_cast<size_type>(from) > size() noexcept { return {}; }
+ };
+
+ /**
+ \brief free standing function for setting up a range based for loop
+ over an integer range
+ for (auto i: range(0,10)) // 0,1,2,3,4,5,6,7,8,9
+ or
+ for (auto i: range(-10,10)) // -10,-9,..,8,9
+ or
+ for (auto i: range(10)) // 0,1,2,3,4,5,6,7,8,9
+ */
+ template<class T, class U,
+ std::enable_if_t<std::is_same<std::decay_t<T>, std::decay_t<U>>::value, int> = 0,
+ std::enable_if_t<std::is_integral<std::decay_t<T>>::value, int> = 0>
+ inline static IntegralRange<std::decay_t<T>> range(T &&from, U &&to) noexcept
+ {
+ return IntegralRange<std::decay_t<T>>(std::forward<T>(from), std::forward<U>(to));
+ }
+
+ template<class T, std::enable_if_t<std::is_integral<std::decay_t<T>>::value, int> = 0>
+ inline static IntegralRange<std::decay_t<T>> range(T &&to) noexcept
+ {
+ return IntegralRange<std::decay_t<T>>(std::forward<T>(to));
+ }
+
+ template<class T, std::enable_if_t<std::is_enum<std::decay_t<T>>::value, int> = 0>
+ inline static IntegralRange<std::underlying_type_t<std::decay_t<T>>> range(T &&to) noexcept
+ {
+ return IntegralRange<std::underlying_type_t<std::decay_t<T>>>(std::forward<T>(to));
+ }
+
+ template<class T, T from, T to>
+ inline static StaticIntegralRange<T, to, from> range(std::integral_constant<T, from>, std::integral_constant<T, to>) noexcept
+ {
+ return {};
+ }
+
+ template<class T, T to>
+ inline static StaticIntegralRange<T, to> range(std::integral_constant<T, to>) noexcept
+ {
+ return {};
+ }
+
+
+
+ /**
+ * \brief Tag to enable value based transformations in TransformedRangeView
+ */
+ struct ValueTransformationTag {};
+
+ /**
+ * \brief Tag to enable iterator based transformations in TransformedRangeView
+ */
+ struct IteratorTransformationTag {};
+
+ namespace Impl
+ {
+
+ // Helper class to mimic a pointer for proxy objects.
+ // This is needed to implement operator-> on an iterator
+ // using proxy-values. It stores the proxy value but
+ // provides operator-> like a pointer.
+ template<class ProxyType>
+ class PointerProxy
+ {
+ public:
+ PointerProxy(ProxyType&& p) : p_(p)
+ {}
+
+ ProxyType* operator->()
+ {
+ return &p_;
+ }
+
+ ProxyType p_;
+ };
+
+ // An iterator transforming a wrapped iterator using
+ // an unary function. It inherits the iterator-category
+ // of the underlying iterator.
+ template <class I, class F, class TransformationType, class C = typename std::iterator_traits<I>::iterator_category>
+ class TransformedRangeIterator;
+
+ template <class I, class F, class TransformationType>
+ class TransformedRangeIterator<I,F,TransformationType,std::forward_iterator_tag>
+ {
+ protected:
+
+ static decltype(auto) transform(const F& f, const I& it) {
+ if constexpr (std::is_same_v<TransformationType,IteratorTransformationTag>)
+ return f(it);
+ else
+ return f(*it);
+ }
+
+ public:
+ using iterator_category = std::forward_iterator_tag;
+ using reference = decltype(transform(std::declval<F>(), std::declval<I>()));
+ using value_type = std::decay_t<reference>;
+ using pointer = PointerProxy<value_type>;
+
+ // If we later want to allow standalone TransformedRangeIterators,
+ // we could customize the FunctionPointer to be a default-constructible,
+ // copy-assignable type storing a function but acting like a pointer
+ // to function.
+ using FunctionPointer = const F*;
+
+ constexpr TransformedRangeIterator(const I& it, FunctionPointer f) noexcept :
+ it_(it),
+ f_(f)
+ {}
+
+ // Explicitly initialize members. Using a plain
+ //
+ // constexpr TransformedRangeIterator() noexcept {}
+ //
+ // would default-initialize the members while
+ //
+ // constexpr TransformedRangeIterator() noexcept : it_(), f_() {}
+ //
+ // leads to value-initialization. This is a case where
+ // both are really different. If it_ is a raw pointer (i.e. POD)
+ // then default-initialization leaves it uninitialized while
+ // value-initialization zero-initializes it.
+ constexpr TransformedRangeIterator() noexcept :
+ it_(),
+ f_()
+ {}
+
+ // Dereferencing returns a value created by the function
+ constexpr reference operator*() const noexcept {
+ return transform(*f_, it_);
+ }
+
+ // Dereferencing returns a value created by the function
+ pointer operator->() const noexcept {
+ return transform(*f_, it_);
+ }
+
+ constexpr TransformedRangeIterator& operator=(const TransformedRangeIterator& other) = default;
+
+ constexpr bool operator==(const TransformedRangeIterator& other) const noexcept {
+ return (it_ == other.it_);
+ }
+
+ constexpr bool operator!=(const TransformedRangeIterator& other) const noexcept {
+ return (it_ != other.it_);
+ }
+
+ TransformedRangeIterator& operator++() noexcept {
+ ++it_;
+ return *this;
+ }
+
+ TransformedRangeIterator operator++(int) noexcept {
+ TransformedRangeIterator copy(*this);
+ ++(*this);
+ return copy;
+ }
+
+ protected:
+ I it_;
+ FunctionPointer f_;
+ };
+
+
+
+ template <class I, class F, class T>
+ class TransformedRangeIterator<I,F,T,std::bidirectional_iterator_tag> :
+ public TransformedRangeIterator<I,F,T,std::forward_iterator_tag>
+ {
+ protected:
+ using Base = TransformedRangeIterator<I,F,T,std::forward_iterator_tag>;
+ using Base::it_;
+ using Base::f_;
+ public:
+ using iterator_category = std::bidirectional_iterator_tag;
+ using reference = typename Base::reference;
+ using value_type = typename Base::value_type;
+ using pointer = typename Base::pointer;
+
+ using FunctionPointer = typename Base::FunctionPointer;
+
+ using Base::Base;
+
+ // Member functions of the forward_iterator that need
+ // to be redefined because the base class methods return a
+ // forward_iterator.
+ constexpr TransformedRangeIterator& operator=(const TransformedRangeIterator& other) = default;
+
+ TransformedRangeIterator& operator++() noexcept {
+ ++it_;
+ return *this;
+ }
+
+ TransformedRangeIterator operator++(int) noexcept {
+ TransformedRangeIterator copy(*this);
+ ++(*this);
+ return copy;
+ }
+
+ // Additional member functions of bidirectional_iterator
+ TransformedRangeIterator& operator--() noexcept {
+ --(this->it_);
+ return *this;
+ }
+
+ TransformedRangeIterator operator--(int) noexcept {
+ TransformedRangeIterator copy(*this);
+ --(*this);
+ return copy;
+ }
+ };
+
+
+
+ template <class I, class F, class T>
+ class TransformedRangeIterator<I,F,T,std::random_access_iterator_tag> :
+ public TransformedRangeIterator<I,F,T,std::bidirectional_iterator_tag>
+ {
+ protected:
+ using Base = TransformedRangeIterator<I,F,T,std::bidirectional_iterator_tag>;
+ using Base::it_;
+ using Base::f_;
+ public:
+ using iterator_category = std::random_access_iterator_tag;
+ using reference = typename Base::reference;
+ using value_type = typename Base::value_type;
+ using pointer = typename Base::pointer;
+ using difference_type = typename std::iterator_traits<I>::difference_type;
+
+ using FunctionPointer = typename Base::FunctionPointer;
+
+ using Base::Base;
+
+ // Member functions of the forward_iterator that need
+ // to be redefined because the base class methods return a
+ // forward_iterator.
+ constexpr TransformedRangeIterator& operator=(const TransformedRangeIterator& other) = default;
+
+ TransformedRangeIterator& operator++() noexcept {
+ ++it_;
+ return *this;
+ }
+
+ TransformedRangeIterator operator++(int) noexcept {
+ TransformedRangeIterator copy(*this);
+ ++(*this);
+ return copy;
+ }
+
+ // Member functions of the bidirectional_iterator that need
+ // to be redefined because the base class methods return a
+ // bidirectional_iterator.
+ TransformedRangeIterator& operator--() noexcept {
+ --(this->it_);
+ return *this;
+ }
+
+ TransformedRangeIterator operator--(int) noexcept {
+ TransformedRangeIterator copy(*this);
+ --(*this);
+ return copy;
+ }
+
+ // Additional member functions of random_access_iterator
+ TransformedRangeIterator& operator+=(difference_type n) noexcept {
+ it_ += n;
+ return *this;
+ }
+
+ TransformedRangeIterator& operator-=(difference_type n) noexcept {
+ it_ -= n;
+ return *this;
+ }
+
+ bool operator<(const TransformedRangeIterator& other) noexcept {
+ return it_<other.it_;
+ }
+
+ bool operator<=(const TransformedRangeIterator& other) noexcept {
+ return it_<=other.it_;
+ }
+
+ bool operator>(const TransformedRangeIterator& other) noexcept {
+ return it_>other.it_;
+ }
+
+ bool operator>=(const TransformedRangeIterator& other) noexcept {
+ return it_>=other.it_;
+ }
+
+ reference operator[](difference_type n) noexcept {
+ return Base::transform(*f_, it_+n);
+ }
+
+ friend
+ TransformedRangeIterator operator+(const TransformedRangeIterator& it, difference_type n) noexcept {
+ return TransformedRangeIterator(it.it_+n, it.f_);
+ }
+
+ friend
+ TransformedRangeIterator operator+(difference_type n, const TransformedRangeIterator& it) noexcept {
+ return TransformedRangeIterator(n+it.it_, it.f_);
+ }
+
+ friend
+ TransformedRangeIterator operator-(const TransformedRangeIterator& it, difference_type n) noexcept {
+ return TransformedRangeIterator(it.it_-n, it.f_);
+ }
+
+ friend
+ difference_type operator-(const TransformedRangeIterator& first, const TransformedRangeIterator& second) noexcept {
+ return first.it_-second.it_;
+ }
+ };
+
+
+ } // namespace Impl
+
+
+
+ /**
+ * \brief A range transforming the values of another range on-the-fly
+ *
+ * This behaves like a range providing `begin()` and `end()`.
+ * The iterators over this range internally iterate over
+ * the wrapped range. When dereferencing the iterator,
+ * the value is transformed on-the-fly using a given
+ * transformation function leaving the underlying range
+ * unchanged.
+ *
+ * The transformation may either return temporary values
+ * or l-value references. In the former case the range behaves
+ * like a proxy-container. In the latter case it forwards these
+ * references allowing, e.g., to sort a subset of some container
+ * by applying a transformation to an index-range for those values.
+ *
+ * The iterators of the TransformedRangeView have the same
+ * iterator_category as the ones of the wrapped container.
+ *
+ * If range is given as r-value, then the returned TransformedRangeView
+ * stores it by value, if range is given as (const) l-value, then the
+ * TransformedRangeView stores it by (const) reference.
+ *
+ * If R is a value type, then the TransformedRangeView stores the wrapped range by value,
+ * if R is a reference type, then the TransformedRangeView stores the wrapped range by reference.
+ *
+ * \tparam R Underlying range.
+ * \tparam F Unary function used to transform the values in the underlying range.
+ * \tparam T Class for describing how to apply the transformation
+ *
+ * T has to be either ValueTransformationTag (default) or IteratorTransformationTag.
+ * In the former case, the transformation is applied to the values
+ * obtained by dereferencing the wrapped iterator. In the latter case
+ * it is applied to the iterator directly, allowing to access non-standard
+ * functions of the iterator.
+ **/
+ template <class R, class F, class T=ValueTransformationTag>
+ class TransformedRangeView
+ {
+ using RawConstIterator = std::decay_t<decltype(std::declval<const R>().begin())>;
+ using RawIterator = std::decay_t<decltype(std::declval<R>().begin())>;
+
+ public:
+
+ /**
+ * \brief Const iterator type
+ *
+ * This inherits the iterator_category of the iterators
+ * of the underlying range.
+ */
+ using const_iterator = Impl::TransformedRangeIterator<RawConstIterator, F, T>;
+
+ /**
+ * \brief Iterator type
+ *
+ * This inherits the iterator_category of the iterators
+ * of the underlying range.
+ */
+ using iterator = Impl::TransformedRangeIterator<RawIterator, F, T>;
+
+ /**
+ * \brief Export type of the wrapped untransformed range.
+ *
+ * Notice that this will always be the raw type with references
+ * removed, even if a reference is stored.
+ */
+ using RawRange = std::remove_reference_t<R>;
+
+ /**
+ * \brief Construct from range and function
+ */
+ template<class RR>
+ constexpr TransformedRangeView(RR&& rawRange, const F& f) noexcept :
+ rawRange_(std::forward<RR>(rawRange)),
+ f_(f)
+ {
+ static_assert(std::is_same_v<T, ValueTransformationTag> or std::is_same_v<T, IteratorTransformationTag>,
+ "The TransformationType passed to TransformedRangeView has to be either ValueTransformationTag or IteratorTransformationTag.");
+ }
+
+ /**
+ * \brief Obtain a iterator to the first element
+ *
+ * The life time of the returned iterator is bound to
+ * the life time of the range since it only contains a
+ * pointer to the transformation function stored
+ * in the range.
+ */
+ constexpr const_iterator begin() const noexcept {
+ return const_iterator(rawRange_.begin(), &f_);
+ }
+
+ constexpr iterator begin() noexcept {
+ return iterator(rawRange_.begin(), &f_);
+ }
+
+ /**
+ * \brief Obtain a iterator past the last element
+ *
+ * The life time of the returned iterator is bound to
+ * the life time of the range since it only contains a
+ * pointer to the transformation function stored
+ * in the range.
+ */
+ constexpr const_iterator end() const noexcept {
+ return const_iterator(rawRange_.end(), &f_);
+ }
+
+ constexpr iterator end() noexcept {
+ return iterator(rawRange_.end(), &f_);
+ }
+
+ /**
+ * \brief Obtain the size of the range
+ *
+ * This is only available if the underlying range
+ * provides a size() method. In this case size()
+ * just forwards to the underlying range's size() method.
+ *
+ * Attention: Don't select the template parameters explicitly.
+ * They are only used to implement SFINAE.
+ */
+ template<class Dummy=R,
+ class = std::void_t<decltype(std::declval<Dummy>().size())>>
+ auto size() const
+ {
+ return rawRange_.size();
+ }
+
+ /**
+ * \brief Export the wrapped untransformed range.
+ */
+ const RawRange& rawRange() const
+ {
+ return rawRange_;
+ }
+
+ /**
+ * \brief Export the wrapped untransformed range.
+ */
+ RawRange& rawRange()
+ {
+ return rawRange_;
+ }
+
+ private:
+ R rawRange_;
+ F f_;
+ };
+
+ /**
+ * \brief Create a TransformedRangeView
+ *
+ * \param range The range to transform
+ * \param f Unary function that should the applied to the entries of the range.
+ *
+ * This behaves like a range providing `begin()` and `end()`.
+ * The iterators over this range internally iterate over
+ * the wrapped range. When dereferencing the iterator,
+ * the wrapped iterator is dereferenced,
+ * the given transformation function is applied on-the-fly,
+ * and the result is returned.
+ * I.e, if \code it \endcode is the wrapped iterator
+ * and \code f \endcode is the transformation function,
+ * then the result of \code f(*it) \endcode is returned
+ *
+ * The transformation may either return temporary values
+ * or l-value references. In the former case the range behaves
+ * like a proxy-container. In the latter case it forwards these
+ * references allowing, e.g., to sort a subset of some container
+ * by applying a transformation to an index-range for those values.
+ *
+ * The iterators of the TransformedRangeView have the same
+ * iterator_category as the ones of the wrapped container.
+ *
+ * If range is an r-value, then the TransformedRangeView stores it by value,
+ * if range is an l-value, then the TransformedRangeView stores it by reference.
+ **/
+ template <class R, class F>
+ auto transformedRangeView(R&& range, const F& f)
+ {
+ return TransformedRangeView<R, F, ValueTransformationTag>(std::forward<R>(range), f);
+ }
+
+ /**
+ * \brief Create a TransformedRangeView using an iterator transformation
+ *
+ * \param range The range to transform
+ * \param f Unary function that should the applied to the entries of the range.
+ *
+ * This behaves like a range providing `begin()` and `end()`.
+ * The iterators over this range internally iterate over
+ * the wrapped range. When dereferencing the iterator,
+ * the given transformation function is applied to the wrapped
+ * iterator on-the-fly and the result is returned.
+ * I.e, if \code it \endcode is the wrapped iterator
+ * and \code f \endcode is the transformation function,
+ * then the result of \code f(it) \endcode is returned.
+ *
+ * The transformation may either return temorary values
+ * or l-value references. In the former case the range behaves
+ * like a proxy-container. In the latter case it forwards these
+ * references allowing, e.g., to sort a subset of some container
+ * by applying a transformation to an index-range for those values.
+ *
+ * The iterators of the TransformedRangeView have the same
+ * iterator_category as the ones of the wrapped container.
+ *
+ * If range is an r-value, then the TransformedRangeView stores it by value,
+ * if range is an l-value, then the TransformedRangeView stores it by reference.
+ **/
+ template <class R, class F>
+ auto iteratorTransformedRangeView(R&& range, const F& f)
+ {
+ return TransformedRangeView<R, F, IteratorTransformationTag>(std::forward<R>(range), f);
+ }
+
+
+ /**
+ * \brief Allow structured-binding for-loops for sparse iterators
+ *
+ * Given a sparse range `R` whose iterators `it`
+ * provide (additionally to dereferencing) a method
+ * `it->index()` for accessing the index of the current entry in the
+ * sparse range, this allows to write code like
+ * \code
+ * for(auto&& [A_i, i] : sparseRange(R))
+ * doSomethingWithValueAndIndex(A_i, i);
+ * \endcode
+ */
+ template<class Range>
+ auto sparseRange(Range&& range) {
+ return Dune::iteratorTransformedRangeView(std::forward<Range>(range), [](auto&& it) {
+ return std::tuple<decltype(*it), decltype(it.index())>(*it, it.index());
+ });
+ }
+
+ /**
+ * @}
+ */
+
+}
+
+#endif // DUNE_COMMON_RANGE_UTILITIES_HH
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_COMMON_RESERVEDVECTOR_HH
+#define DUNE_COMMON_RESERVEDVECTOR_HH
+
+/** \file
+ * \brief An stl-compliant random-access container which stores everything on the stack
+ */
+
+#include <algorithm>
+#include <iostream>
+#include <cstddef>
+#include <dune/common/genericiterator.hh>
+#include <initializer_list>
+
+#include <dune/common/hash.hh>
+
+#ifdef CHECK_RESERVEDVECTOR
+#define CHECKSIZE(X) assert(X)
+#else
+#define CHECKSIZE(X) {}
+#endif
+
+namespace Dune
+{
+ /**
+ \brief A Vector class with statically reserved memory.
+
+ ReservedVector is something between std::array and std::vector.
+ It is a dynamically sized vector which can be extended and shrunk
+ using methods like push_back and pop_back, but reserved memory is
+ statically predefined.
+
+ This implies that the vector cannot grow bigger than the predefined
+ maximum size.
+
+ \tparam T The data type ReservedVector stores.
+ \tparam n The maximum number of objects the ReservedVector can store.
+
+ */
+ template<class T, int n>
+ class ReservedVector
+ {
+ public:
+
+ /** @{ Typedefs */
+
+ //! The type of object, T, stored in the vector.
+ typedef T value_type;
+ //! Pointer to T.
+ typedef T* pointer;
+ //! Reference to T
+ typedef T& reference;
+ //! Const reference to T
+ typedef const T& const_reference;
+ //! An unsigned integral type.
+ typedef size_t size_type;
+ //! A signed integral type.
+ typedef std::ptrdiff_t difference_type;
+ //! Iterator used to iterate through a vector.
+ typedef Dune::GenericIterator<ReservedVector, value_type> iterator;
+ //! Const iterator used to iterate through a vector.
+ typedef Dune::GenericIterator<const ReservedVector, const value_type> const_iterator;
+
+ /** @} */
+
+ /** @{ Constructors */
+
+ //! Constructor
+ ReservedVector() = default;
+
+ ReservedVector(std::initializer_list<T> const &l)
+ {
+ assert(l.size() <= n);// Actually, this is not needed any more!
+ sz = l.size();
+ std::copy_n(l.begin(), sz, data);
+ }
+
+ /** @} */
+
+ bool operator == (const ReservedVector & other) const
+ {
+ bool eq = (sz == other.sz);
+ for (size_type i=0; i<sz && eq; ++i)
+ eq = eq && (data[i] == other.data[i]);
+ return eq;
+ }
+
+ /** @{ Data access operations */
+
+ //! Erases all elements.
+ void clear()
+ {
+ sz = 0;
+ }
+
+ //! Specifies a new size for the vector.
+ void resize(size_t s)
+ {
+ CHECKSIZE(s<=n);
+ sz = s;
+ }
+
+ //! Appends an element to the end of a vector, up to the maximum size n, O(1) time.
+ void push_back(const T& t)
+ {
+ CHECKSIZE(sz<n);
+ data[sz++] = t;
+ }
+
+ //! Erases the last element of the vector, O(1) time.
+ void pop_back()
+ {
+ if (! empty()) sz--;
+ }
+
+ //! Returns a iterator pointing to the beginning of the vector.
+ iterator begin(){
+ return iterator(*this, 0);
+ }
+
+ //! Returns a const_iterator pointing to the beginning of the vector.
+ const_iterator begin() const {
+ return const_iterator(*this, 0);
+ }
+
+ //! Returns an iterator pointing to the end of the vector.
+ iterator end(){
+ return iterator(*this, sz);
+ }
+
+ //! Returns a const_iterator pointing to the end of the vector.
+ const_iterator end() const {
+ return const_iterator(*this, sz);
+ }
+
+ //! Returns reference to the i'th element.
+ reference operator[] (size_type i)
+ {
+ CHECKSIZE(sz>i);
+ return data[i];
+ }
+
+ //! Returns a const reference to the i'th element.
+ const_reference operator[] (size_type i) const
+ {
+ CHECKSIZE(sz>i);
+ return data[i];
+ }
+
+ //! Returns reference to first element of vector.
+ reference front()
+ {
+ CHECKSIZE(sz>0);
+ return data[0];
+ }
+
+ //! Returns const reference to first element of vector.
+ const_reference front() const
+ {
+ CHECKSIZE(sz>0);
+ return data[0];
+ }
+
+ //! Returns reference to last element of vector.
+ reference back()
+ {
+ CHECKSIZE(sz>0);
+ return data[sz-1];
+ }
+
+ //! Returns const reference to last element of vector.
+ const_reference back() const
+ {
+ CHECKSIZE(sz>0);
+ return data[sz-1];
+ }
+
+ /** @} */
+
+ /** @{ Informative Methods */
+
+ //! Returns number of elements in the vector.
+ size_type size () const
+ {
+ return sz;
+ }
+
+ //! Returns true if vector has no elements.
+ bool empty() const
+ {
+ return sz==0;
+ }
+
+ //! Returns current capacity (allocated memory) of the vector.
+ static constexpr size_type capacity()
+ {
+ return n;
+ }
+
+ //! Returns the maximum length of the vector.
+ static constexpr size_type max_size()
+ {
+ return n;
+ }
+
+ /** @} */
+
+ //! Send ReservedVector to an output stream
+ friend std::ostream& operator<< (std::ostream& s, const ReservedVector& v)
+ {
+ for (size_t i=0; i<v.size(); i++)
+ s << v[i] << " ";
+ return s;
+ }
+
+ inline friend std::size_t hash_value(const ReservedVector& v) noexcept
+ {
+ return hash_range(v.data,v.data+v.sz);
+ }
+
+ private:
+ T data[n] = {};
+ size_type sz = 0;
+ };
+
+}
+
+DUNE_DEFINE_HASH(DUNE_HASH_TEMPLATE_ARGS(typename T, int n),DUNE_HASH_TYPE(Dune::ReservedVector<T,n>))
+
+#undef CHECKSIZE
+
+#endif // DUNE_COMMON_RESERVEDVECTOR_HH
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_COMMON_SCALARMATRIXVIEW_HH
+#define DUNE_COMMON_SCALARMATRIXVIEW_HH
+
+#include <cstddef>
+#include <type_traits>
+#include <ostream>
+
+#include <dune/common/boundschecking.hh>
+#include <dune/common/typetraits.hh>
+#include <dune/common/matvectraits.hh>
+#include <dune/common/densematrix.hh>
+#include <dune/common/fmatrix.hh>
+#include <dune/common/scalarvectorview.hh>
+
+
+namespace Dune {
+
+namespace Impl {
+
+ /**
+ @addtogroup DenseMatVec
+ @{
+ */
+
+ /*! \file
+ * \brief Implements a scalar matrix view wrapper around an existing scalar.
+ */
+
+ /** \brief A wrapper making a scalar look like a matrix
+ *
+ * This stores a pointer to a scalar of type K and
+ * provides the interface of a matrix with a single row
+ * and column represented by the data behind the pointer.
+ */
+ template<class K>
+ class ScalarMatrixView :
+ public DenseMatrix<ScalarMatrixView<K>>
+ {
+ ScalarVectorView<K> data_;
+ using Base = DenseMatrix<ScalarMatrixView<K>>;
+
+ template <class>
+ friend class ScalarMatrixView;
+ public:
+
+ //===== type definitions and constants
+
+ //! We are at the leaf of the block recursion
+ enum {
+ //! The number of block levels we contain.
+ //! This is always one for this type.
+ blocklevel = 1
+ };
+
+ using size_type = typename Base::size_type;
+ using row_type = typename Base::row_type;
+ using row_reference = typename Base::row_reference;
+ using const_row_reference = typename Base::const_row_reference;
+
+ //! export size
+ enum {
+ //! \brief The number of rows.
+ //! This is always one for this type.
+ rows = 1,
+ //! \brief The number of columns.
+ //! This is always one for this type.
+ cols = 1
+ };
+
+ //===== constructors
+ /** \brief Default constructor
+ */
+ constexpr ScalarMatrixView ()
+ : data_()
+ {}
+
+ /** \brief Construct from a pointer to a scalar */
+ ScalarMatrixView (K* p) :
+ data_(p)
+ {}
+
+ //! Copy constructor
+ ScalarMatrixView (const ScalarMatrixView &other) :
+ Base(),
+ data_(other.data_)
+ {}
+
+ //! Move constructor
+ ScalarMatrixView (ScalarMatrixView &&other) :
+ Base(),
+ data_( other.data_ )
+ {}
+
+ //! Copy assignment operator
+ ScalarMatrixView& operator= (const ScalarMatrixView& other)
+ {
+ data_ = other.data_;
+ return *this;
+ }
+
+ template<class KK>
+ ScalarMatrixView& operator= (const ScalarMatrixView<KK>& other)
+ {
+ data_ = other.data_;
+ return *this;
+ }
+
+ //! Assignment operator from a scalar
+ template<typename T,
+ std::enable_if_t<std::is_convertible<T, K>::value, int> = 0>
+ inline ScalarMatrixView& operator= (const T& k)
+ {
+ data_ = k;
+ return *this;
+ }
+
+ // make this thing a matrix
+ static constexpr size_type mat_rows() { return 1; }
+ static constexpr size_type mat_cols() { return 1; }
+
+ row_reference mat_access ([[maybe_unused]] size_type i)
+ {
+ DUNE_ASSERT_BOUNDS(i == 0);
+ return data_;
+ }
+
+ const_row_reference mat_access ([[maybe_unused]] size_type i) const
+ {
+ DUNE_ASSERT_BOUNDS(i == 0);
+ return data_;
+ }
+ }; // class ScalarMatrixView
+
+ /** \brief Sends the matrix to an output stream */
+ template<typename K>
+ std::ostream& operator<< (std::ostream& s, const ScalarMatrixView<K>& a)
+ {
+ s << a[0][0];
+ return s;
+ }
+
+ /** \brief Wrap a scalar as a 1-1-matrix */
+ template<class T,
+ std::enable_if_t<IsNumber<T>::value, int> = 0>
+ auto asMatrix(T& t)
+ {
+ return ScalarMatrixView<T>{&t};
+ }
+
+ /** \brief Wrap a const scalar as a const 1-1-matrix */
+ template<class T,
+ std::enable_if_t<IsNumber<T>::value, int> = 0>
+ auto asMatrix(const T& t)
+ {
+ return ScalarMatrixView<const T>{&t};
+ }
+
+ /** \brief Non-scalar types are assumed to be matrices, and simply forwarded */
+ template<class T,
+ std::enable_if_t<not IsNumber<T>::value, int> = 0>
+ T& asMatrix(T& t)
+ {
+ return t;
+ }
+
+ /** \brief Non-scalar types are assumed to be matrices, and simply forwarded */
+ template<class T,
+ std::enable_if_t<not IsNumber<T>::value, int> = 0>
+ const T& asMatrix(const T& t)
+ {
+ return t;
+ }
+
+ /** @} end documentation */
+
+} // end namespace Impl
+
+ template<class K>
+ struct FieldTraits<Impl::ScalarMatrixView<K>> : public FieldTraits<std::remove_const_t<K>> {};
+
+ template<class K>
+ struct DenseMatVecTraits<Impl::ScalarMatrixView<K>>
+ {
+ using derived_type = Impl::ScalarMatrixView<K>;
+ using row_type = Impl::ScalarVectorView<K>;
+ using row_reference = row_type&;
+ using const_row_reference = const row_type&;
+ using value_type = std::remove_const_t<K>;
+ using size_type = std::size_t;
+ };
+
+
+ template<class K>
+ struct AutonomousValueType<Impl::ScalarMatrixView<K>>
+ {
+ using type = FieldMatrix<std::remove_const_t<K>,1,1>;
+ };
+
+
+} // end namespace Dune
+
+#endif // DUNE_COMMON_SCALARMATRIXVIEW_HH
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_COMMON_SCALARVECTORVIEW_HH
+#define DUNE_COMMON_SCALARVECTORVIEW_HH
+
+#include <cstddef>
+#include <type_traits>
+#include <istream>
+
+#include <dune/common/densevector.hh>
+#include <dune/common/fvector.hh>
+#include <dune/common/typetraits.hh>
+#include <dune/common/matvectraits.hh>
+
+namespace Dune {
+
+namespace Impl {
+
+ /** @addtogroup DenseMatVec
+ @{
+ */
+
+ /*! \file
+ * \brief Implements a scalar vector view wrapper around an existing scalar.
+ */
+
+ /** \brief A wrapper making a scalar look like a vector
+ *
+ * This stores a pointer to a scalar of type K and
+ * provides the interface of a vector with a single
+ * entry represented by the data behind the pointer.
+ */
+ template<class K>
+ class ScalarVectorView :
+ public DenseVector<ScalarVectorView<K>>
+ {
+ K* dataP_;
+ using Base = DenseVector<ScalarVectorView<K>>;
+
+ template <class>
+ friend class ScalarVectorView;
+ public:
+
+ //! export size
+ enum {
+ //! The size of this vector.
+ dimension = 1
+ };
+
+ /** \brief The type used for array indices and sizes */
+ using size_type = typename Base::size_type;
+
+ /** \brief The type used for references to the vector entry */
+ using reference = std::decay_t<K>&;
+
+ /** \brief The type used for const references to the vector entry */
+ using const_reference = const K&;
+
+ //===== construction
+
+ /** \brief Default constructor */
+ constexpr ScalarVectorView ()
+ : dataP_(nullptr)
+ {}
+
+ /** \brief Construct from a pointer to a scalar */
+ ScalarVectorView (K* p) :
+ dataP_(p)
+ {}
+
+ //! Copy constructor
+ ScalarVectorView (const ScalarVectorView &other) :
+ Base(),
+ dataP_(other.dataP_)
+ {}
+
+ //! Move constructor
+ ScalarVectorView (ScalarVectorView &&other) :
+ Base(),
+ dataP_( other.dataP_ )
+ {}
+
+ //! Copy assignment operator
+ ScalarVectorView& operator= (const ScalarVectorView& other)
+ {
+ assert(dataP_);
+ assert(other.dataP_);
+ *dataP_ = *(other.dataP_);
+ return *this;
+ }
+
+ template<class KK>
+ ScalarVectorView& operator= (const ScalarVectorView<KK>& other)
+ {
+ assert(dataP_);
+ assert(other.dataP_);
+ *dataP_ = *(other.dataP_);
+ return *this;
+ }
+
+ //! Assignment operator from a scalar
+ template<typename T,
+ std::enable_if_t<std::is_convertible<T, K>::value, int> = 0>
+ inline ScalarVectorView& operator= (const T& k)
+ {
+ *dataP_ = k;
+ return *this;
+ }
+
+ /** \brief Container size -- this is always 1 */
+ static constexpr size_type size ()
+ {
+ return 1;
+ }
+
+ /** \brief Random access operator, actually disregards its argument */
+ K& operator[] ([[maybe_unused]] size_type i)
+ {
+ DUNE_ASSERT_BOUNDS(i == 0);
+ return *dataP_;
+ }
+
+ /** \brief Const random access operator, actually disregards its argument */
+ const K& operator[] ([[maybe_unused]] size_type i) const
+ {
+ DUNE_ASSERT_BOUNDS(i == 0);
+ return *dataP_;
+ }
+ }; // class ScalarVectorView
+
+} // namespace Impl
+
+
+ template< class K>
+ struct DenseMatVecTraits< Impl::ScalarVectorView<K> >
+ {
+ using derived_type = Impl::ScalarVectorView<K>;
+ using value_type = std::remove_const_t<K>;
+ using size_type = std::size_t;
+ };
+
+ template< class K >
+ struct FieldTraits< Impl::ScalarVectorView<K> > : public FieldTraits<std::remove_const_t<K>> {};
+
+ template<class K>
+ struct AutonomousValueType<Impl::ScalarVectorView<K>>
+ {
+ using type = FieldVector<std::remove_const_t<K>,1>;
+ };
+
+namespace Impl {
+
+ /** \brief Read a ScalarVectorView from an input stream
+ * \relates ScalarVectorView
+ *
+ * \note This operator is STL compliant, i.e., the content of v is only
+ * changed if the read operation is successful.
+ *
+ * \param[in] in std :: istream to read from
+ * \param[out] v ScalarVectorView to be read
+ *
+ * \returns the input stream (in)
+ */
+ template<class K>
+ inline std::istream &operator>> ( std::istream &in, ScalarVectorView<K> &v )
+ {
+ K w;
+ if(in >> w)
+ v = w;
+ return in;
+ }
+
+
+ /** \brief Wrap a scalar as a 1-vector */
+ template<class T,
+ std::enable_if_t<IsNumber<T>::value, int> = 0>
+ auto asVector(T& t)
+ {
+ return ScalarVectorView<T>{&t};
+ }
+
+ /** \brief Wrap a const scalar as a const 1-vector */
+ template<class T,
+ std::enable_if_t<IsNumber<T>::value, int> = 0>
+ auto asVector(const T& t)
+ {
+ return ScalarVectorView<const T>{&t};
+ }
+
+ /** \brief Non-scalar types are assumed to be arrays, and simply forwarded */
+ template<class T,
+ std::enable_if_t<not IsNumber<T>::value, int> = 0>
+ T& asVector(T& t)
+ {
+ return t;
+ }
+
+ /** \brief Non-scalar types are assumed to be arrays, and simply forwarded */
+ template<class T,
+ std::enable_if_t<not IsNumber<T>::value, int> = 0>
+ const T& asVector(const T& t)
+ {
+ return t;
+ }
+
+} // end namespace Impl
+
+} // end namespace Dune
+
+#endif // DUNE_COMMON_SCALARVECTORVIEW_HH
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+#ifndef DUNE_SHARED_PTR_HH
+#define DUNE_SHARED_PTR_HH
+
+#include <memory>
+
+#include <dune/common/typetraits.hh>
+/**
+ * @file
+ * @brief This file implements several utilities related to std::shared_ptr
+ * @author Markus Blatt
+ */
+namespace Dune
+{
+ /**
+ @brief implements the Deleter concept of shared_ptr without deleting anything
+ @relates shared_ptr
+
+ If you allocate an object on the stack, but want to pass it to a class or function as a shared_ptr,
+ you can use this deleter to avoid accidental deletion of the stack-allocated object.
+
+ For convenience we provide two free functions to create a shared_ptr from a stack-allocated object
+ (\see stackobject_to_shared_ptr):
+
+ 1) Convert a stack-allocated object to a shared_ptr:
+ @code
+ int i = 10;
+ std::shared_ptr<int> pi = stackobject_to_shared_ptr(i);
+ @endcode
+ 2) Convert a stack-allocated object to a std::shared_ptr of a base class
+ @code
+ class A {};
+ class B : public A {};
+
+ ...
+
+ B b;
+ std::shared_ptr<A> pa = stackobject_to_shared_ptr<A>(b);
+ @endcode
+
+ @tparam T type of the stack-allocated object
+ */
+ template<class T>
+ struct null_deleter
+ {
+ void operator() (T*) const {}
+ };
+
+ /**
+ @brief Create a shared_ptr for a stack-allocated object
+ @relatesalso null_deleter
+ @code
+ #include <dune/common/shared_ptr.hh>
+ @endcode
+
+ Usage:
+ @code
+ int i = 10;
+ std::shared_ptr<int> pi = stackobject_to_shared_ptr(i);
+ @endcode
+ The @c std::shared_ptr points to the object on the stack, but its deleter is
+ set to an instance of @c null_deleter so that nothing happens when the @c
+ shared_ptr is destroyed.
+
+ @sa null_deleter
+ */
+ template<typename T>
+ inline std::shared_ptr<T> stackobject_to_shared_ptr(T & t)
+ {
+ return std::shared_ptr<T>(&t, null_deleter<T>());
+ }
+
+
+ /**
+ * \brief Capture R-value reference to shared_ptr
+ *
+ * This will store a copy of the passed object in
+ * a shared_ptr.
+ *
+ * The two overloads of wrap_or_move are intended
+ * to capture references and temporaries in a unique
+ * way without creating copies and only moving if
+ * necessary.
+ *
+ * Be careful: Only use this function if you are
+ * aware of it's implications. You can e.g. easily
+ * end up storing a reference to a temporary if
+ * you use this inside of another function without
+ * perfect forwarding.
+ */
+ template<class T>
+ auto wrap_or_move(T&& t)
+ {
+ return std::make_shared<std::decay_t<T>>(std::forward<T>(t));
+ }
+
+ /**
+ * \brief Capture L-value reference to std::shared_ptr
+ *
+ * This will store a pointer for the passed reference
+ * in a non-owning std::shared_ptr.
+ *
+ * The two overloads of wrap_or_move are intended
+ * to capture references and temporaries in a unique
+ * way without creating copies and only moving if
+ * necessary.
+ *
+ * Be careful: Only use this function if you are
+ * aware of it's implications. You can e.g. easily
+ * end up storing a reference to a temporary if
+ * you use this inside of another function without
+ * perfect forwarding.
+ */
+ template<class T>
+ auto wrap_or_move(T& t)
+ {
+ return stackobject_to_shared_ptr(t);
+ }
+
+}
+#endif
--- /dev/null
+#ifndef DUNE_COMMON_SIMD_HH
+#define DUNE_COMMON_SIMD_HH
+
+#warning dune/common/simd.hh is deprecated.
+#warning Use the new infrastructure from dune/common/simd/simd.h instead.
+
+/**
+ \file
+
+ \brief Abstractions for support of dedicated SIMD data types
+
+ Libraries like Vc (https://github.com/VcDevel/Vc) add high-level
+ data types for SIMD (or vectorization) support in C++. Most of
+ these operations mimic the behavior of a numerical data type. Some
+ boolean operations can not be implemented in a compatible way to
+ trivial data types.
+
+ This header contains additional abstractions to help writing code
+ that works with trivial numerical data types (like double) and Vc
+ vectorization data types.
+
+ See also the conditional.hh and range_utils.hh headers.
+
+ \deprecated Use the newer simd architecture from dune/common/simd/simd.hh
+ instead.
+ */
+
+#include <cassert>
+#include <cstddef>
+#include <type_traits>
+#include <utility>
+
+#include <dune/common/conditional.hh>
+#include <dune/common/debugalign.hh>
+#include <dune/common/rangeutilities.hh>
+#if HAVE_VC
+// include Vc part of new simd interface to provide compatibility for
+// functionality that has been switched over.
+#include <dune/common/simd/vc.hh>
+#endif
+#include <dune/common/typetraits.hh>
+#include <dune/common/vc.hh>
+
+namespace Dune
+{
+
+#if HAVE_VC
+ namespace VcImpl {
+ //! A reference-like proxy for elements of random-access vectors.
+ /**
+ * This is necessary because Vc's lane-access operation return a proxy
+ * that cannot constructed by non-Vc code (i.e. code that isn't
+ * explicitly declared `friend`). This means in particular that there
+ * is no copy/move constructor, meaning we cannot return such proxies
+ * from our own functions, such as `lane()`. To work around this, we
+ * define our own proxy class which internally holds a reference to the
+ * vector and a lane index.
+ */
+ template<class V>
+ class Proxy
+ {
+ static_assert(std::is_same<V, std::decay_t<V> >::value, "Class Proxy "
+ "may only be instantiated with unqualified types");
+ public:
+ using value_type = typename V::value_type;
+
+ private:
+ static_assert(std::is_arithmetic<value_type>::value,
+ "Only artihmetic types are supported");
+ V &vec_;
+ std::size_t idx_;
+
+ public:
+ Proxy(std::size_t idx, V &vec)
+ : vec_(vec), idx_(idx)
+ { }
+
+ operator value_type() const { return vec_[idx_]; }
+
+ // postfix operators
+
+ template<class T = value_type,
+ class = std::enable_if_t<!std::is_same<T, bool>::value> >
+ value_type operator++(int) { return vec_[idx_]++; }
+ template<class T = value_type,
+ class = std::enable_if_t<!std::is_same<T, bool>::value> >
+ value_type operator--(int) { return vec_[idx_]--; }
+
+ // unary (prefix) operators
+ template<class T = value_type,
+ class = std::enable_if_t<!std::is_same<T, bool>::value> >
+ Proxy &operator++() { ++(vec_[idx_]); return *this; }
+ template<class T = value_type,
+ class = std::enable_if_t<!std::is_same<T, bool>::value> >
+ Proxy &operator--() { --(vec_[idx_]); return *this; }
+ decltype(auto) operator!() const { return !(vec_[idx_]); }
+ decltype(auto) operator+() const { return +(vec_[idx_]); }
+ decltype(auto) operator-() const { return -(vec_[idx_]); }
+ template<class T = value_type,
+ class = std::enable_if_t<std::is_integral<T>::value> >
+ decltype(auto) operator~() const { return ~(vec_[idx_]); }
+
+ // binary operators
+#define DUNE_SIMD_VC_BINARY_OP(OP) \
+ template<class T> \
+ auto operator OP(T &&o) const \
+ -> decltype(vec_[idx_] OP valueCast(std::forward<T>(o))) \
+ { \
+ return vec_[idx_] OP valueCast(std::forward<T>(o)); \
+ } \
+ static_assert(true, "Require semicolon to unconfuse editors")
+
+ DUNE_SIMD_VC_BINARY_OP(*);
+ DUNE_SIMD_VC_BINARY_OP(/);
+ DUNE_SIMD_VC_BINARY_OP(%);
+
+ DUNE_SIMD_VC_BINARY_OP(+);
+ DUNE_SIMD_VC_BINARY_OP(-);
+
+ DUNE_SIMD_VC_BINARY_OP(<<);
+ DUNE_SIMD_VC_BINARY_OP(>>);
+
+ DUNE_SIMD_VC_BINARY_OP(<);
+ DUNE_SIMD_VC_BINARY_OP(>);
+ DUNE_SIMD_VC_BINARY_OP(<=);
+ DUNE_SIMD_VC_BINARY_OP(>=);
+
+ DUNE_SIMD_VC_BINARY_OP(==);
+ DUNE_SIMD_VC_BINARY_OP(!=);
+
+ DUNE_SIMD_VC_BINARY_OP(&);
+ DUNE_SIMD_VC_BINARY_OP(^);
+ DUNE_SIMD_VC_BINARY_OP(|);
+
+ DUNE_SIMD_VC_BINARY_OP(&&);
+ DUNE_SIMD_VC_BINARY_OP(||);
+#undef DUNE_SIMD_VC_BINARY_OP
+
+#define DUNE_SIMD_VC_ASSIGNMENT(OP) \
+ template<class T> \
+ auto operator OP(T &&o) \
+ -> std::enable_if_t<AlwaysTrue<decltype( \
+ vec_[idx_] OP valueCast(std::forward<T>(o)) \
+ )>::value, Proxy&> \
+ { \
+ vec_[idx_] OP valueCast(std::forward<T>(o)); \
+ return *this; \
+ } \
+ static_assert(true, "Require semicolon to unconfuse editors")
+
+ DUNE_SIMD_VC_ASSIGNMENT(=);
+ DUNE_SIMD_VC_ASSIGNMENT(*=);
+ DUNE_SIMD_VC_ASSIGNMENT(/=);
+ DUNE_SIMD_VC_ASSIGNMENT(%=);
+ DUNE_SIMD_VC_ASSIGNMENT(+=);
+ DUNE_SIMD_VC_ASSIGNMENT(-=);
+ DUNE_SIMD_VC_ASSIGNMENT(<<=);
+ DUNE_SIMD_VC_ASSIGNMENT(>>=);
+ DUNE_SIMD_VC_ASSIGNMENT(&=);
+ DUNE_SIMD_VC_ASSIGNMENT(^=);
+ DUNE_SIMD_VC_ASSIGNMENT(|=);
+#undef DUNE_SIMD_VC_ASSIGNMENT
+
+ // swap on proxies swaps the proxied vector entries. As such, it
+ // applies to rvalues of proxies too, not just lvalues
+ template<class V1, class V2>
+ friend void swap(Proxy<V1>, Proxy<V2>);
+
+ template<class T>
+ friend void swap(Proxy p1, T& s2)
+ {
+ // don't use swap() ourselves -- not supported by Vc 1.3.0 (but is
+ // supported by Vc 1.3.2)
+ T tmp = p1.vec_[p1.idx_];
+ p1.vec_[p1.idx_] = s2;
+ s2 = tmp;
+ }
+
+ template<class T>
+ friend void swap(T& s1, Proxy p2)
+ {
+ T tmp = s1;
+ s1 = p2.vec_[p2.idx_];
+ p2.vec_[p2.idx_] = tmp;
+ }
+ };
+
+ template<class V1, class V2>
+ void swap(Proxy<V1> p1, Proxy<V2> p2)
+ {
+ typename V1::value_type tmp = p1.vec_[p1.idx_];
+ p1.vec_[p1.idx_] = p2.vec_[p2.idx_];
+ p2.vec_[p2.idx_] = tmp;
+ }
+ } // namespace VcImpl
+#endif // HAVE_VC
+
+ template<typename T>
+ struct SimdScalarTypeTraits
+ {
+ using type = T;
+ };
+
+ template<typename T>
+ using SimdScalar = typename SimdScalarTypeTraits<T>::type;
+
+#if HAVE_VC
+ /*
+ Add Vc specializations for the SimdScalarTypeTraits trais class
+ */
+ template<typename T, typename A>
+ struct SimdScalarTypeTraits< Vc::Vector<T,A> >
+ {
+ using type = T;
+ };
+
+ template<typename T, std::size_t N, typename V, std::size_t M>
+ struct SimdScalarTypeTraits< Vc::SimdArray<T,N,V,M> >
+ {
+ using type = T;
+ };
+#endif // HAVE_VC
+
+ //! deduce the underlying scalar data type of an AlignedNumber
+ template<typename T, std::size_t align>
+ struct SimdScalarTypeTraits< AlignedNumber<T,align> >
+ {
+ using type = T;
+ };
+
+ template<typename V, typename = void>
+ struct SimdIndexTypeTraits {
+ using type = std::size_t;
+ };
+
+ //! An simd vector of indices corresponding to a simd vector V
+ /**
+ * lanes(T()) == lanes(SimdIndex<T>()) holds.
+ *
+ * \note The size of the elements of a SimdIndex isn't very well-defined.
+ * Be careful.
+ */
+ template<typename V>
+ using SimdIndex = typename SimdIndexTypeTraits<V>::type;
+
+#if HAVE_VC
+ template<typename T, typename A>
+ struct SimdIndexTypeTraits<Vc::Vector<T, A> > {
+ using type = typename Vc::Vector<T, A>::index_type;
+ };
+
+ template<typename T, std::size_t n, typename V>
+ struct SimdIndexTypeTraits<Vc::SimdArray<T, n, V> > {
+ using type = typename Vc::SimdArray<T, n, V>::index_type;
+ };
+#endif // HAVE_VC
+
+ template<typename V, typename = void>
+ struct SimdMaskTypeTraits {
+ using type = bool;
+ };
+
+ //! A simd vector of truth values corresponding to a simd vector V
+ /**
+ * lanes(T()) == lanes(SimdMask<T>()) holds.
+ */
+ template<typename V>
+ using SimdMask = typename SimdMaskTypeTraits<V>::type;
+
+#if HAVE_VC
+ template<typename T, typename A>
+ struct SimdMaskTypeTraits<Vc::Vector<T, A> > {
+ using type = typename Vc::Vector<T, A>::mask_type;
+ };
+
+ template<typename T, std::size_t n, typename V>
+ struct SimdMaskTypeTraits<Vc::SimdArray<T, n, V> > {
+ using type = typename Vc::SimdArray<T, n, V>::mask_type;
+ };
+#endif // HAVE_VC
+
+#if HAVE_VC
+ /*
+ Add Vc specializations for cond(), see conditional.hh
+ */
+ template<typename T, typename A>
+ Vc::Vector<T,A> cond(const Vc::Mask<T,A> & b,
+ const Vc::Vector<T,A> & v1,
+ const Vc::Vector<T,A> & v2)
+ {
+ return std::move(Vc::iif(b, v1, v2));
+ }
+
+ template<typename T, std::size_t N, typename V, std::size_t M>
+ Vc::SimdArray<T,N,V,M> cond(const typename Vc::SimdArray<T,N,V,M>::mask_type & b,
+ const Vc::SimdArray<T,N,V,M> & v1,
+ const Vc::SimdArray<T,N,V,M> & v2)
+ {
+ return std::move(Vc::iif(b, v1, v2));
+ }
+#endif // HAVE_VC
+
+#if HAVE_VC
+ /*
+ Add Vc specializations for several boolean operations, see rangeutitlities.hh:
+
+ max_value, min_value, any_true, all_true
+ */
+ template<typename T, typename A>
+ T max_value(const Vc::Vector<T,A> & v)
+ {
+ return v.max();
+ }
+
+ template<typename T, std::size_t N, typename V, std::size_t M>
+ double max_value(const Vc::SimdArray<T,N,V,M> & v)
+ {
+ return v.max();
+ }
+
+ template<typename T, typename A>
+ T min_value(const Vc::Vector<T,A> & v)
+ {
+ return v.min();
+ }
+
+ template<typename T, std::size_t N, typename V, std::size_t M>
+ double min_value(const Vc::SimdArray<T,N,V,M> & v)
+ {
+ return v.min();
+ }
+
+ template<typename T, typename A>
+ bool any_true(const Vc::Mask<T,A> & v)
+ {
+ return Vc::any_of(v);
+ }
+
+ template<typename T, std::size_t N, typename V, std::size_t M>
+ bool any_true(const Vc::SimdMaskArray<T,N,V,M> & v)
+ {
+ return Vc::any_of(v);
+ }
+
+ template<typename T, typename A>
+ bool all_true(const Vc::Mask<T,A> & v)
+ {
+ return Vc::all_of(v);
+ }
+
+ template<typename T, std::size_t N, typename V, std::size_t M>
+ bool all_true(const Vc::SimdMaskArray<T,N,V,M> & v)
+ {
+ return Vc::all_of(v);
+ }
+#endif // HAVE_VC
+
+ //! get the number of lanes of a simd vector (scalar version)
+ template<class T>
+ std::size_t lanes(const T &) { return 1; }
+
+ //! access a lane of a simd vector (scalar version)
+ template<class T>
+ T lane(std::size_t l, const T &v)
+ {
+ assert(l == 0);
+ return v;
+ }
+
+ //! access a lane of a simd vector (scalar version)
+ template<class T>
+ T &lane(std::size_t l, T &v)
+ {
+ assert(l == 0);
+ return v;
+ }
+
+#if HAVE_VC
+ template<class T, class A>
+ std::size_t lanes(const Vc::Vector<T, A> &)
+ {
+ return Vc::Vector<T, A>::size();
+ }
+
+ template<class T, class A>
+ T lane(std::size_t l, const Vc::Vector<T, A> &v)
+ {
+ assert(l < lanes(v));
+ return v[l];
+ }
+
+ template<class T, class A>
+ auto lane(std::size_t l, Vc::Vector<T, A> &v)
+ {
+ assert(l < lanes(v));
+ return VcImpl::Proxy<Vc::Vector<T, A> >{l, v};
+ }
+
+ template<class T, std::size_t n, class V>
+ std::size_t lanes(const Vc::SimdArray<T, n, V> &)
+ {
+ return n;
+ }
+
+ template<class T, std::size_t n, class V>
+ T lane(std::size_t l, const Vc::SimdArray<T, n, V> &v)
+ {
+ assert(l < n);
+ return v[l];
+ }
+
+ template<class T, std::size_t n, class V>
+ auto lane(std::size_t l, Vc::SimdArray<T, n, V> &v)
+ {
+ assert(l < n);
+ return VcImpl::Proxy<Vc::SimdArray<T, n, V> >{l, v};
+ }
+
+ template<class T, std::size_t n, class V>
+ std::size_t lanes(const Vc::SimdMaskArray<T, n, V> &)
+ {
+ return n;
+ }
+
+ template<class T, std::size_t n, class V>
+ bool lane(std::size_t l, const Vc::SimdMaskArray<T, n, V> &v)
+ {
+ assert(l < n);
+ return v[l];
+ }
+
+ template<class T, std::size_t n, class V>
+ auto lane(std::size_t l, Vc::SimdMaskArray<T, n, V> &v)
+ {
+ assert(l < n);
+ return VcImpl::Proxy<Vc::SimdMaskArray<T, n, V> >{l, v};
+ }
+#endif // HAVE_VC
+
+ //! masked Simd assignment (scalar version)
+ /**
+ * Assign \c src to \c dest for those lanes where \c mask is true.
+ */
+ template<class T>
+ void assign(T &dst, const T &src, bool mask)
+ {
+ if(mask) dst = src;
+ }
+
+#if HAVE_VC
+ /*
+ Add Vc specializations for masked assignment
+ */
+ template<class T, class A>
+ void assign(Vc::Vector<T, A> &dst, const Vc::Vector<T, A> &src,
+ typename Vc::Vector<T, A>::mask_type mask)
+ {
+ dst(mask) = src;
+ }
+
+ template<class T, std::size_t n, class V>
+ void assign(Vc::SimdArray<T, n, V> &dst, const Vc::SimdArray<T, n, V> &src,
+ typename Vc::SimdArray<T, n, V>::mask_type mask)
+ {
+ dst(mask) = src;
+ }
+#endif // HAVE_VC
+
+ template<class T>
+ void swap(T &v1, T &v2, bool mask)
+ {
+ using std::swap;
+ if(mask) swap(v1, v2);
+ }
+
+#if HAVE_VC
+ /*
+ Add Vc specializations for masked swap
+ */
+ template<class T, class A>
+ void swap(Vc::Vector<T, A> &v1, Vc::Vector<T, A> &v2,
+ typename Vc::Vector<T, A>::mask_type mask)
+ {
+ auto tmp = v1;
+ v1(mask) = v2;
+ v2(mask) = tmp;
+ }
+
+ template<class T, std::size_t n, class V>
+ void swap(Vc::SimdArray<T, n, V> &v1, Vc::SimdArray<T, n, V> &v2,
+ typename Vc::SimdArray<T, n, V>::mask_type mask)
+ {
+ auto tmp = v1;
+ v1(mask) = v2;
+ v2(mask) = tmp;
+ }
+#endif // HAVE_VC
+
+} // end namespace Dune
+
+#endif // DUNE_COMMON_SIMD_HH
--- /dev/null
+add_subdirectory(test)
+
+if(NOT VC_FOUND)
+ exclude_dir_from_headercheck()
+endif()
+
+#install headers
+install(FILES
+ base.hh
+ defaults.hh
+ interface.hh
+ io.hh
+ loop.hh
+ simd.hh
+ standard.hh
+ test.hh # may be used from dependent modules
+ vc.hh
+DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/dune/common/simd)
--- /dev/null
+This document is a collection of thoughts and rationals on a proper SIMD
+interface for Dune.
+
+What do we want?
+================
+
+We want an abstraction layer for SIMD-libraries, that allows the core library
+to handle simd-vector-like types where (until now) it could only handle scalar
+types. It is expected that those parts of the core library that want to
+support vector-like types will need some adaptation to account for corner
+cases that appear only when vectorizing. However, it should usually be
+uneccessary to maintain a scalar version of the code -- the vectorized version
+should be able to handle both vectorized and scalar data types.
+
+What this abstraction layer does not provide (at least initially) is a way to
+actually create vector types from scratch. It must however provide a way to
+create corresponding types to a type that already exist. I.e. if your code
+got simd-vector-type argument, the abstraction layer will provide you with the
+the number of lanes, and the type of the entries. It will also provide you
+with a way to create simd types with the same number of lanes but a different
+entry type.
+
+Built-in Types
+==============
+
+We generally do not want to have to modify existing interfaces. This implies
+that the built-in types must be a valid "vectorization library" that the
+abstraction layer can deal with. Since the built-in types are not classes,
+this precludes certain idioms that are widespread among vectorization
+libraries.
+
+For instance if `x` is of a vector type, in many libraries one would access
+the `i`'th lane of `x` with the expression `x[i]`. We cannot support this
+expression if `x` is of a scalar built-in type, because we can overload
+`operator[]` only for class types. An alternative syntax is to use a
+free-standing access function `lane(i, x)`.
+
+Restriction on Vectorization Libraries
+======================================
+
+We generally expect vectorization libraries to provide all the usual operators
+(arithmetic operations, assignment, comparisons) for their vector types.
+Comparisons should yield mask types specific to that vectorization library;
+these must be summarized to `bool` with functions of the abstraction layer
+(like `anyTrue()`) before they can be used in `if`-conditions.
+
+We may require them to provide conversions from scalar types to vector types
+to some extend, however, an exact specification needs more experience.
+
+Specifically for vectors (or masks) `v1` and `v2` of type `V`, with associated
+scalar type `T=Scalar<V>`, we require
+
+- for any unary arithmetic expression `@v1` (where `@` is one of `+`, `-`, or
+ `~`):
+ `lane(l,@v1) == T(@lane(l,v1))` for all `l`
+ there are no side-effects
+
+- for any binary arithmetic expression `v1@v2` (where `@` is one of `+`, `-`,
+ `*`, `/`, `%`, `<<`, `>>`, `&`, `|`, `^`):
+ `lane(l,v1@v2) == T(lane(l,v1)@lane(l,v2))` for all `l`
+ there are no side-effects
+
+- for any compound assignment expression `v1@=v2` (where `@` is one of`+`,
+ `-`, `*`, `/`, `%`, `<<`, `>>`, `&`, `|`, `^`):
+ `v1@=v2` has the same side-effects as `lane(l,v1)@=lane(l,v2)` for all `l`
+ the result of `v1@=v2` is an lvalue denoting `v1`
+
+- for any comparison expression `v1@v2` (where `@` is one of `==`, `!=`, `<`,
+ `<=`, `>` or `>=`):
+ `lane(l,v1@v2) == lane(l,v1)@lane(l,v2)` for all `l`
+ The result of `v1@v2` is a prvalue of type `Mask<V>`
+ there are no side-effects
+
+- for the unary logic expression `!v1`:
+ `lane(l,!v1) == !lane(l,v1)` for all `l`
+ The result of `!v1` is a prvalue of type `Mask<V>`
+ there are no side-effects
+
+- for any binary logic expression `v1@v2` (where `@` is one of `&&` or `||`):
+ `lane(l,v1@v2) == lane(l,v1)@lane(l,v2)` for all `l`
+ The result of `v1@v2` is a prvalue of type `Mask<V>`
+ there are no side-effects
+
+Note 1: Short-circuiting may or may not happen for `&&` and `||` -- it will
+happen for the built-in types, but it cannot happen for proper multi-lane simd
+types.
+
+Note 2: For all expressions there is a lane-wise equality requirement with the
+scalar operation. This requirement is formulated such that promotions of
+arguments are permitted, but not required. This is neccessary to allow both
+the built-in types (which are promoted) and proper simd types (which typically
+are not promoted to stay within the same simd register).
+
+Note 3: The `==` in the lane-wise equality requirement may be overloaded to
+account for proxies returned by `lane()`.
+
+Note 4: Any expression that is invalid for the scalar case is not required for
+the simd case either.
+
+`#include` Structure
+====================
+
+There will be one header that ensures that the interface names are available.
+This include also pulls in the part of the abstraction layer that enables use
+of the built-in scalar types. Any code that only makes use of the abstraction
+layer needs to include this header, and only this header.
+
+Any compilation unit (generally a `.cc`-file) that creates vectorized types
+(other then the scalar built-in types) using some vectorization library, and
+hands those types to vectorization-enabled dune code, is responsible for
+
+1. including the neccessary headers providing the abstraction for that
+ vectorization library, as specified in the documentation of the
+ abstraction, and
+
+2. making sure the compilation with all the compiler/linker settings (flags,
+ defines, libraries) needed by the vectorization library.
+
+The ADL-Problem
+===============
+
+Consider the following example of a vectorization of
+`Dune::FooVector::two_norm()`, which is implemented in
+`dune/common/foovector.hh`:
+
+```c++
+// SIMD interface and implementation for scalar built-ins
+#include <dune/common/simd/interface.hh>
+
+namespace Dune {
+ template<class T>
+ class FooVector
+ {
+ T data_[FOO];
+ public:
+ T two_norm2() const {
+ using Dune::Simd::lane;
+ using Dune::Simd::lanes;
+ T sum(0);
+ for(const auto &entry : data_)
+ {
+ // break vectorization for demonstration purposes
+ for(std::size_t l = 0; l < lanes(entry); ++l)
+ lane(l, sum) += lane(l, entry) * lane(l, entry);
+ }
+ return sum;
+ }
+ };
+}
+```
+
+This can then be used like this:
+
+```c++
+#include <dune/common/foovector.hh>
+// provide dune-abstraction for mysimdlib
+// also pulls in the neccessary includes for mysimdlib
+#include <dune/common/simd/mysimdlib.hh>
+
+int main()
+{
+ using T = mysimdlib::Vector;
+ Dune::FooVector<T> x(T(0));
+ x.two_norm2();
+}
+```
+
+This will not work. At least not with a straightforward implementation of
+`lane()` and `lanes()`, where `dune/common/simd/simdlib.hh` simply puts
+overloads into the `Dune::Simd` namespace. Here's why.
+
+The compiler has several ways to find functions that are not qualified by
+namespaces (or something similar). One way is unqualified lookup: the
+compiler looks for functions that are in some enclosing scope _at the time the
+template containing the function call is read_ (early binding). Another is
+argument-dependend lookup (ADL): the compiler looks for functions in the
+namespaces associated with the types of its arguments _at the time the
+function call is instantiated_ (late binding).
+
+In the example above, `T` a.k.a. `mysimdlib::Vector` is defined in the
+namespace `mysimdlib`, which is unlikely to contain the functions `lane()` and
+`lanes()`. The abstraction layer could put overloads of those functions into
+that namespace, but, well, you're not supposed to meddle in foreign namespace
+unless given special permission. As a consequence, `lane()` and `lanes()`
+cannot be found by ADL.
+
+And they cannot be found by any other lookup either. After preprocessing the
+compiler will see something like this:
+
+```c++
+// from dune/common/simd/interface.hh
+namespace Dune::Simd {
+ std::size_t lanes(double) { return 1; }
+ double lane(std::size_t, double v) { return v; }
+ double& lane(std::size_t, double& v) { return v; }
+}
+
+// from dune/common/foovector.hh
+namespace Dune {
+ template<class T>
+ class FooVector
+ {
+ T data_[FOO];
+ public:
+ T two_norm2() const {
+ using Dune::Simd::lane;
+ using Dune::Simd::lanes;
+ T sum(0);
+ for(const auto &entry : data_)
+ {
+ // break vectorization for demonstration purposes
+ for(std::size_t l = 0; l < lanes(entry); ++l)
+ lane(l, sum) += lane(l, entry) * lane(l, entry);
+ }
+ return sum;
+ }
+ };
+}
+
+// from some mysimdlib-specific header
+namespace mysimdlib {
+ class Vector { /*...*/ };
+}
+
+// from dune/common/simd/mysimdlib.hh
+namespace Dune::Simd {
+ std::size_t lanes(mysimdlib::Vector v);
+ double lane(std::size_t, mysimdlib::Vector);
+ double& lane(std::size_t, mysimdlib::Vector&);
+}
+
+// from myprog.cc
+int main()
+{
+ using T = mysimdlib::Vector;
+ Dune::FooVector<T> x(T(0));
+ x.two_norm2();
+}
+```
+
+At the time when the definition of `Dune::FooVector::two_norm2()` is read,
+only the declarations for `lane()` and `lanes()` for scalar built-in types are
+visible. By the time `Dune::FooVector<mysimdlib::Vector>::two_norm2()` is
+instanciated, the proper declarations for `lane()` and `lanes()` are visible.
+But that is too late, because unqualified lookup does early binding. It would
+be OK for late binding, but only ADL does that, and ADL does not work as noted
+above.
+
+Note that ADL is the _only_ type of lookup that does late binding. So we
+cannot simply require the user to use another type of lookup.
+
+Working around the ADL Problem
+==============================
+
+To get around the ADL problem, we can attempt the following:
+
+```c++
+// dune/common/simd/interface.hh
+
+namespace Dune::Simd {
+ namespace Overloads {
+ struct ADLTag {};
+ }
+
+ template<class T>
+ std::size_t lanes(T v)
+ {
+ return lanes(Overloads::ADLTag(), v);
+ }
+
+ template<class T>
+ auto lane(std::size_t i, T v)
+ {
+ return lane(Overloads::ADLTag(), i, v);
+ }
+
+ //...
+
+ // implementation for scalar built-ins
+ namespace Overloads {
+ std::size_t lanes(ADLTag, double) { return 1; }
+ double lane(ADLTag, std::size_t, double v) { return v; }
+ // etc...
+ }
+}
+```
+
+And for each vectorization library:
+```c++
+// dune/common/simd/mysimdlib.hh
+
+#include <mysimdlib.h>
+
+#include <dune/common/simd/interface.hh>
+
+namespace Dune::Simd::Overloads {
+ std::size_t lanes(ADLTag, mysimdlib::Vector v);
+ double lane(ADLTag, std::size_t, mysimdlib::Vector v);
+ // etc...
+}
+```
+
+Core Dune code can then use the functions in `Dune::Simd` without
+restrictions. These functions themselves make sure to find the implementation
+functions via ADL, so that the lookup uses late binding and thus can find
+functions that are declared later.
--- /dev/null
+#ifndef DUNE_COMMON_SIMD_BASE_HH
+#define DUNE_COMMON_SIMD_BASE_HH
+
+/** @file
+ * @brief Basic definitions for SIMD Implementations
+ * @ingroup SIMDAbstract
+ *
+ * This file provides basic definitions and template declarations that are
+ * used to write SIMD abtraction layers.
+ *
+ * This file should never be included by users of the SIMD
+ * abstraction. Include <dune/common/simd/simd.hh> instead.
+ */
+
+/** @defgroup SIMD Vectorization
+ * @ingroup Common
+ * @brief Abstractions for using vectorization libraries
+ *
+ * This vectorization abstraction targets three kinds of developers:
+ *
+ * - Application developers create SIMD types (usually with the help of some
+ * vectorization library) and pass them to the Dune library. They are
+ * responsible for a compilation unit, typically a .cc file that is compiled
+ * into a program or part of a library. Since they create the type, they
+ * have the knowledge which library abstraction is needed and are
+ * responsible for including that, as well as making sure the correct
+ * compiler flags are provided.
+ *
+ * - Library developers implement support in Dune for handling SIMD types,
+ * e.g. by extending some existing class. By using the interfaces provided
+ * here, they should not have to worry about the exact vectorization library
+ * beeing used, or whether a vectorization library is used at all.
+ *
+ * - Abstraction developers provide the necessary hooks to make a
+ * vectorization library known to this interface. They are also responsible
+ * for documenting for application developers how to meet the prerequisites
+ * for using the abstraction, e.g. which headers to include and how to add
+ * the necessary compiler flags.
+ */
+
+/** @defgroup SIMDApp Application Developer's Interface
+ * @ingroup SIMD
+ * @brief How to request vectorization from Dune
+ *
+ * This module describes how to pass vectorized types to Dune classes. It
+ * lists the supported vectorization libraries and how to include each
+ * (although it cannot list those libraries where support is not part of the
+ * dune core).
+ */
+
+/** @defgroup SIMDLib Library Developer's Interface
+ * @ingroup SIMD
+ * @brief How to support vectorization in Dune classes
+ *
+ * This module describes how a Dune library developer can add support for
+ * vectorization to library facilities.
+ */
+
+/** @defgroup SIMDAbstract Abstraction Developer's Interface
+ * @ingroup SIMD
+ * @brief How to add support for a new vectorization library
+ *
+ * This module describes the interface that you must implement if you want to
+ * provide an abstraction layer for some vectorization library. To understand
+ * some of the design choices, have a look at dune/common/simd/DESIGN.md in
+ * dune-common's source.
+ *
+ * Everything an abstraction implementation needs to provide is in namespace
+ * `Dune::Simd::Overloads`.
+ *
+ * An implementation must specialize all the template classes in namespace
+ * `Overloads` (with the exception of `Overloads::ADLTag`, see below). To
+ * make it possible for certain specializations not to participate in overload
+ * resolution, each template class provides a dummy template parameter \c
+ * SFINAETag that defaults to \c void.
+ *
+ * An implementation must overload all functions within namespace `Overloads`
+ * that are defined deleted. It may overload other functions if the default
+ * behaviour is not suitable. All functions take a value of type
+ * `Overloads::ADLTag<priority, true>` as their first argument to enable
+ * argument-dependent lookup, to be able to prioritize different overloads
+ * with respect to each other, and to be able to inhibit certain overloads
+ * from taking part in overload resolution. See the documentation for
+ * `Overloads::ADLTag` for a detailed explanation.
+ *
+ * An abstraction implementation may not specialize `Overloads::ADLTag`, and
+ * may not introduce new names into namespace `Overloads`.
+ */
+
+namespace Dune {
+ namespace Simd {
+
+ //! @brief Namespace for the overloads and specializations that make up a
+ //! SIMD implementation
+ /**
+ * @ingroup SIMDAbstract
+ *
+ * This namespace contains three sets of things: the struct ADLTag, which
+ * is used to look up functions in this namespace using argument-dependet
+ * lookup, traits classes that must be specialized by abstraction
+ * implementations, and functions that must/can be overloaded by
+ * abstraction implementations.
+ *
+ * \note Only introduce new names into this namespace to extend the
+ * interface. This applies in particular to people in the
+ * "abstraction developer" role; they may meddle in this namespace
+ * only by providing overloads and/or specializations for existing
+ * names (and for `ADLTag` even that is prohibited).
+ */
+ namespace Overloads {
+
+ //! @addtogroup SIMDAbstract
+ //! @{
+
+ //! Tag used to force late-binding lookup in Dune::Simd::Overloads
+ /**
+ * This tag is used by functions in \c Dune::Simd to make
+ * argument-dependent lookups (ADL) for functions in \c
+ * Dune::Simd::Overloads. The property of ADL that is used here is that
+ * it binds the names of functions late, i.e. at the time of
+ * instantiation, while all other lookups bind early, i.e. at the time
+ * when the function call is parsed. Using late binding enables a
+ * function \c foo() to find a functions \c Overloads::foo() that has
+ * been declared only after \c foo() itself has been defined:
+ *
+ * \code
+ * template<class V>
+ * void foo(V v)
+ * {
+ * foo(Overloads::ADLTag<6>{}, v);
+ * }
+ *
+ * struct MyType {};
+ * namespace Overloads {
+ * void foo(ADLTag<4>, MyType v);
+ * }
+ * \endcode
+ *
+ * \note It is generally an error to declare a function with an ADLTag
+ * argument outside of namespace Simd::Overloads. An exception
+ * would be an abstraction implementation that declares all its
+ * implementation functions in its own implementation namespace,
+ * and then pulls them into the namespace Overloads by way of \c
+ * using.
+ *
+ * `ADLTag<i>` derives from `ADLTag<i-1>`. Thus it is possible to
+ * prioritize overloads by choosing an appropriate \c i. The following
+ * values for \c i are predefined:
+ * - `i==0,1`: these are reserved for the defaults.
+ * - `i==2,3`: these are reserved for the implementation for standard
+ * types.
+ * - `i==5,6`: these should normally be used by other implementations
+ *
+ * The lower priority should be used by default. The higher priority
+ * can be used by an implementation to resolve ambiguities, e.g. between
+ * an overload with a by-value argument and an overload with an
+ * lvalue-reference argument.
+ *
+ * The folloing priorities should not normally be used. However, they
+ * may sometimes be necessary:
+ * - \c i==4: override standard implementation, but prefer other
+ * implementations
+ * - \c i==7: try to override other implementations
+ *
+ * \c i==7 is the highest supported priority.
+ *
+ * The second (bool) template argument is to make writing abstraction
+ * implementations that use SFINAE to remove (some of) their functions
+ * from the overload set more concise. \c ADLTag<i,false> is not
+ * defined, so instead of
+ * \code
+ * std::enable_if_t<cond, ADLTag<4> >
+ * \endcode
+ * you may write the equivalent
+ * \code
+ * ADLTag<4, cond>
+ * \endcode
+ */
+ template<unsigned i, bool = true>
+ struct ADLTag;
+
+ template<unsigned i>
+ struct ADLTag<i> : ADLTag<i-1> {};
+
+ template<>
+ struct ADLTag<0> {};
+
+ //! should have a member type \c type
+ /**
+ * Implements `Simd::Scalar`. `V` will never have cv or reference
+ * qualifiers, no need to strip those.
+ */
+ template<class V, class SFINAETag = void>
+ struct ScalarType;
+
+ //! should have a member type \c type
+ /**
+ * Implements `Simd::Rebind`. `V` and `S` will never have cv or
+ * reference qualifiers, no need to strip those.
+ */
+ template<class S, class V, class SFINAETag = void>
+ struct RebindType;
+
+ //! should be derived from a `Dune::index_constant`
+ /**
+ * Implements `Simd::lanes()`. `V` will never have cv or reference
+ * qualifiers, no need to strip those.
+ */
+ template<class V, class SFINAETag = void>
+ struct LaneCount;
+
+ //! @} Group SIMDAbstract
+
+ } // namespace Overloads
+ } // namespace Simd
+} // namespace Dune
+
+#endif // DUNE_COMMON_SIMD_BASE_HH
--- /dev/null
+#ifndef DUNE_COMMON_SIMD_DEFAULTS_HH
+#define DUNE_COMMON_SIMD_DEFAULTS_HH
+
+/** @file
+ * @brief Default implementations for SIMD Implementations
+ * @ingroup SIMDAbstract
+ *
+ * This file provides default overloads for SIMD implementation functions, and
+ * deleted placeholders where there are no default implementations.
+ *
+ * This file should never be included by users of the SIMD
+ * abstraction. Include <dune/common/simd/simd.hh> instead.
+ */
+
+#include <algorithm>
+#include <cstddef>
+#include <type_traits>
+
+#include <dune/common/rangeutilities.hh>
+#include <dune/common/simd/base.hh>
+#include <dune/common/simd/interface.hh>
+#include <dune/common/typelist.hh>
+#include <dune/common/typetraits.hh>
+
+namespace Dune {
+ namespace Simd {
+ namespace Overloads {
+
+ /**
+ * @addtogroup SIMDAbstract
+ * @{
+ */
+
+ /** @name Overloadable and default functions
+ *
+ * This group contains functions that you, as an abstraction developer,
+ * must implement. All functions that are deleted must be provided,
+ * functions that have a default implementation may be left
+ * unimplemented if the default behaviour is satisfactory.
+ *
+ * @{
+ */
+
+ //! implements Simd::lane()
+ template<class V>
+ decltype(auto) lane(ADLTag<0>, std::size_t l, V v) = delete;
+
+ //! implements Simd::implCast<V>(V)
+ template<class V>
+ constexpr V implCast(ADLTag<0>, MetaType<V>, const V &u)
+ {
+ return u;
+ }
+
+ //! implements Simd::implCast<V>(U)
+ template<class V, class U>
+ constexpr V implCast(ADLTag<0>, MetaType<V>, const U &u)
+ {
+ V result(Simd::Scalar<V>(0));
+ for(auto l : range(Simd::lanes(u)))
+ Simd::lane(l, result) = Simd::lane(l, u);
+ return result;
+ }
+
+ //! implements Simd::broadcast<V>()
+ template<class V, class S>
+ auto broadcast(ADLTag<0>, MetaType<V>, S s)
+ {
+ return V(Simd::Scalar<V>(s));
+ }
+
+ //! implements Simd::cond()
+ template<class V>
+ V cond(ADLTag<0>, const Mask<V> &mask,
+ const V &ifTrue, const V &ifFalse) = delete;
+
+ //! implements binary Simd::max()
+ template<class V>
+ auto max(ADLTag<0>, const V &v1, const V &v2)
+ {
+ using std::max;
+ return max(v1, v2);
+ }
+
+ //! implements binary Simd::min()
+ template<class V>
+ auto min(ADLTag<0>, const V &v1, const V &v2)
+ {
+ using std::min;
+ return min(v1, v2);
+ }
+
+ //! implements Simd::anyTrue()
+ template<class Mask>
+ bool anyTrue(ADLTag<0>, const Mask &mask) = delete;
+
+ //! implements Simd::allTrue()
+ /**
+ * Default uses Simd::anyTrue()
+ */
+ template<class Mask>
+ bool allTrue(ADLTag<0>, const Mask &mask)
+ {
+ return !Dune::Simd::anyTrue(!mask);
+ }
+
+ //! implements Simd::anyFalse()
+ /**
+ * Default uses Simd::anyTrue()
+ */
+ template<class Mask>
+ bool anyFalse(ADLTag<0>, const Mask &mask)
+ {
+ return Dune::Simd::anyTrue(!mask);
+ }
+
+ //! implements Simd::allFalse()
+ /**
+ * Default uses Simd::anyTrue()
+ */
+ template<class Mask>
+ bool allFalse(ADLTag<0>, const Mask &mask)
+ {
+ return !Dune::Simd::anyTrue(mask);
+ }
+
+ //! implements Simd::maxValue()
+ template<class V>
+ auto max(ADLTag<0>, const V &v)
+ {
+ Scalar<V> m = Simd::lane(0, v);
+ for(std::size_t l = 1; l < Simd::lanes(v); ++l)
+ if(m < Simd::lane(l, v))
+ m = Simd::lane(l, v);
+ return m;
+ }
+
+ //! implements Simd::minValue()
+ template<class V>
+ auto min(ADLTag<0>, const V &v)
+ {
+ Scalar<V> m = Simd::lane(0, v);
+ for(std::size_t l = 1; l < Simd::lanes(v); ++l)
+ if(Simd::lane(l, v) < m)
+ m = Simd::lane(l, v);
+ return m;
+ }
+
+ //! implements Simd::mask()
+ template<class V>
+ Mask<V> mask(ADLTag<0, std::is_same<V, Mask<V> >::value>,
+ const V &v)
+ {
+ return v;
+ }
+
+ //! implements Simd::mask()
+ template<class V>
+ auto mask(ADLTag<0, !std::is_same<V, Mask<V> >::value>,
+ const V &v)
+ {
+ using Copy = AutonomousValue<V>; // just in case we are handed a proxy
+ return v != Copy(Scalar<Copy>(0));
+ }
+
+ //! implements Simd::maskOr()
+ template<class V1, class V2>
+ auto maskOr(ADLTag<0>, const V1 &v1, const V2 &v2)
+ {
+ return Simd::mask(v1) || Simd::mask(v2);
+ }
+
+ //! implements Simd::maskAnd()
+ template<class V1, class V2>
+ auto maskAnd(ADLTag<0>, const V1 &v1, const V2 &v2)
+ {
+ return Simd::mask(v1) && Simd::mask(v2);
+ }
+
+ //! @} Overloadable and default functions
+ //! @} Group SIMDAbstract
+ } // namespace Overloads
+ } // namespace Simd
+} // namespace Dune
+
+#endif // DUNE_COMMON_SIMD_DEFAULTS_HH
--- /dev/null
+#ifndef DUNE_COMMON_SIMD_INTERFACE_HH
+#define DUNE_COMMON_SIMD_INTERFACE_HH
+
+/** @file
+ * @brief User interface of the SIMD abstraction
+ * @ingroup SIMDLib
+ *
+ * This file provides the user interface functions of the SIMD abstraction
+ * layer.
+ *
+ * This file should never be included by users of the SIMD
+ * abstraction. Include <dune/common/simd/simd.hh> instead.
+ */
+
+#include <cassert>
+#include <cstddef>
+#include <type_traits>
+#include <utility>
+
+#include <dune/common/simd/base.hh>
+#include <dune/common/typelist.hh>
+
+namespace Dune {
+
+ //! @brief Namespace for vectorization interface functions used by library
+ //! developers
+ /**
+ * @ingroup SIMDLib
+ */
+ namespace Simd {
+
+ /** @addtogroup SIMDLib
+ *
+ * @{
+ *
+ * @section understand_simd Understanding SIMD types
+ *
+ * The (idealized) model of a SIMD type `V` used in this abstraction layer
+ * is that they are fixed-length vectors of some scalar type `S`.
+ * Operations and operators that take values of type `S` as arguments,
+ * except for `operator,()`, should be overloaded to support values of
+ * type `V` too. These operations should apply element-wise. If the
+ * operation takes more than one argument, it should accept arbitrary
+ * combinations of `V` and `S`. The exception is the combination of `S`
+ * on the left hand side and `V` on the right hand side of one of the
+ * assignment operators, which does not make sense.
+ *
+ * The result of a boolean operation is a mask type `M`, which is a SIMD
+ * type with scalar type `bool` with the same number of elements as `V`.
+ * The result of all other operations is again of type `V`, or of some
+ * type convertible to `V`.
+ *
+ * This is very similar to `std::valarray`, with the main difference
+ * being that `std::valarray` is dynamic in size, while for this
+ * abstraction the size is static.
+ *
+ * @section SIMDLibPromoWarn Type promotion issues
+ *
+ * True SIMD types have an issue with type promotion, which means they
+ * cannot behave completely analogous to built-in integral types (this is
+ * a non-issue with floating point types). Essentially, operations on
+ * true SIMD types cannot promote their arguments, because the promoted
+ * types typically require more storage than the original types, meaning
+ * an argument that was passed in a single vector register would need
+ * multiple vector registers after promotion, which would mean greater
+ * register pressure. Also, there would be conversion operations
+ * required, which (at least on x86) is not typically the case for
+ * promotions of the built-in types. Lastly, with larger types the vector
+ * units can typically operate on fewer lanes at a time.
+ *
+ * Omitting integral promotions has in many cases no negative impact,
+ * because many programmers do not really expect them anyway. There are
+ * however cases where they matter, and for illustration I want to explain
+ * one that crept up during unit testing.
+ *
+ * Here is a simplified (and somewhat pseudo-code) version of the test.
+ * The test checks the result of unary `-` on `Vc::Vector<unsigned short>`
+ * by comparing the result of unary `-` when applied to the complete
+ * vector to the result of unary `-` when applied to each lane
+ * individually.
+ * \code
+ * Vc::Vector<unsigned short> varg;
+ * for(std::size_t l = 0; l < lanes(varg); ++l)
+ * lane(l, varg) = l + 1;
+ * auto vresult = -varg;
+ * for(std::size_t l = 0; l < lanes(varg); ++l)
+ * assert(lane(l, vresult) == -lane(l, varg));
+ * \endcode
+ * The test fails in lane 0. On the left side of the `==`, `lane(0,
+ * vresult)` is `(unsigned short)65535`, which is the same as `(unsigned
+ * short)-1`, as it should be. On the right side, `lane(0, varg)` is
+ * `(unsigned short)1`. `-` promotes its argument, so that becomes
+ * `(int)1`, and the result of the negation is `(int)-1`.
+ *
+ * Now the comparison is `(unsigned short)65535 == (int)-1`. The
+ * comparison operator applies the *usual arithmetic conversions* to bring
+ * both operands to the same type. In this case this boils down to
+ * converting the left side to `int` via integral promotions and the
+ * comparison becomes `(int)65535 == (int)-1`. The result is of course
+ * `false` and the assertion triggers.
+ *
+ * The only way to thoroughly prevent this kind of problem is to convert
+ * the result of any operation back to the expected type. In the above
+ * example, the assertion would need to be written as `assert(lane(l,
+ * vresult) == static_cast<unsigned short>(-lane(l, varg)));`. In
+ * practice, this should only be a problem with operations on unsigned
+ * types where the result may be "negative". Most code in Dune will want
+ * to operate on floating point types, where this is a non-issue.
+ *
+ * (Of couse, this is also a problem for code that operates on untrusted
+ * input, but you should not be doing that with Dune anyway).
+ *
+ * Still, when writing code using the SIMD abstractions, you should be
+ * aware that in the following snippet
+ * \code
+ * auto var1 = lane(0, -vec);
+ * auto var2 = -lane(0, vec);
+ * \endcode
+ * the exact types of `var1` and `var2` may be somewhat surprising.
+ *
+ * @section simd_abstraction_limit Limitations of the Abstraction Layer
+ *
+ * Since the abstraction layer cannot overload operators of SIMD types
+ * (that would be meddling with the domain of the library that provides
+ * the SIMD types), nor provide it's own constructors, there are severe
+ * limitations in what the abstraction layer guarantees. Besides the
+ * standard types, the first SIMD library supported is Vc, so that is
+ * where most of the limitations stem from; see \ref SIMDVcRestrictions in
+ * \ref SIMDVc.
+ *
+ * The biggest limitations are with masks. In Vc masks support a very
+ * restricted set of operations compared to other SIMD types, so in what
+ * follows we will distinguish between masks with a very small set of
+ * operations and between vectors with a larger set of operations.
+ *
+ * Here is a compact table of the limitations as a quick reference,
+ * together with suggested workarounds for the constructs that don't work.
+ * `s` denotes a scalar object/expression (i.e. of type `double` or in the
+ * case of masks `bool`). `v` denotes a vector/mask object/expression.
+ * `sv` means that both scalar and vector arguments are accepted. `V`
+ * denotes a vector/mask type. `@` means any applicable operator that is
+ * not otherwise listed.
+ *
+ * <!-- The following table is in orgtbl format -- If you are using emacs,
+ * you may want to enable the `orgtbl` minor mode. We substitute `|`
+ * with `¦` when describing or-operators so as to not confuse
+ * orgtbl. -->
+ * \code
+ | | Vectors | workaround | Masks | workaround |
+ |-------------------------+---------+----------------------------+-------------+------------------|
+ | V v(s); | y | | y | |
+ | V v = s; | y | V v(s); | *N* | V v(s); |
+ | V v{s}; | *N* | V v(s); | y | V v(s); |
+ | V v = {s}; | *N* | V v(s); | y | V v(s); |
+ |-------------------------+---------+----------------------------+-------------+------------------|
+ | v = s; | y | v = V(s); | *N* | v = V(s); |
+ | v = {s}; | *N* | v = V(s); | *N* | v = V(s); |
+ |-------------------------+---------+----------------------------+-------------+------------------|
+ | v++; ++v; | *N* | v += Scalar<V>(1); | *N*(n/a)[2] | v = V(true); |
+ | v--; --v; | *N* | v -= Scalar<V>(1); | n/a | |
+ |-------------------------+---------+----------------------------+-------------+------------------|
+ | +v; -v; | y | | *N* | none |
+ | !v; | y | | y | |
+ | ~v; | y | | *N* | none |
+ |-------------------------+---------+----------------------------+-------------+------------------|
+ | sv @ sv; but see below | y | | *N* | none |
+ |-------------------------+---------+----------------------------+-------------+------------------|
+ | s << v; s >> v; | *N* | v << V(s); | *N* | none |
+ |-------------------------+---------+----------------------------+-------------+------------------|
+ | v == v; v != v; | y | | *N* [1] | !(v ^ v); v ^ v; |
+ |-------------------------+---------+----------------------------+-------------+------------------|
+ | v & v; v ^ v; v ¦ v; | y | | y | |
+ | v && v; v ¦¦ v; | *N* | maskAnd(v,v); maskOr(v,v); | y | |
+ |-------------------------+---------+----------------------------+-------------+------------------|
+ | v @= sv; but see below | y | | *N* | none |
+ | v &= v; v ^= v; v ¦= v; | y | | y | |
+ |-------------------------+---------+----------------------------+-------------+------------------|
+ | v, v;[3,4] | *N* | void(v), v; | y | |
+ * \endcode
+ *
+ * Notes:
+ *
+ * - [1] In Vc, mask-mask `==` and `!=` operations exist, but the result
+ * is of type `bool`, i.e. a scalar.
+ *
+ * - [2] `++` (either kind) on bools is deprecated by the standard. Our
+ * test suite does not check for it on masks, but it was supported by Vc
+ * masks at some point.
+ *
+ * - [3] Contrary to the other operators, the expected result for `(sv1,
+ * sv2)` is exactly `sv2`, no broadcasting applied.
+ *
+ * - [4] Try to avoid the use of `operator,` unless both operands are
+ * built-in types if possible. Libraries had a tendency to overload
+ * `operator,` to provide for things like container initialization
+ * before C++11, and these overloads may still be present in the library
+ * you are using and replace the default meaning of `operator,`.
+ *
+ * Support levels:
+ *
+ * - `y`: operation generally works; some instances of the operation may
+ * not apply
+ *
+ * - `*N*`: operation generally does not work; some instances of the
+ * operation may not apply
+ *
+ * - `n/a`: operation does not apply (i.e. bitwise operations to
+ * floating-point operands, `--` (and in the future possibly `++`) to
+ * boolean operands, assignment operators to scalar left hand sides)
+ */
+
+ /** @name Basic interface
+ *
+ * Templates and functions in this group are directly implemented by
+ * templates and functions in namespace Overloads.
+ *
+ * @{
+ */
+
+ //! Element type of some SIMD type
+ /**
+ * \tparam V The SIMD (mask or vector) type. `const`, `volatile` or
+ * reference qualifiers are automatically ignored.
+ *
+ * Not all operations that access the element of a vector return (a
+ * reference to) the scalar type -- some may return proxy objects instead.
+ * Use `autoCopy()` to make sure you are getting a prvalue of the scalar
+ * type.
+ *
+ * Implemented by `Overloads::ScalarType`.
+ */
+ template<class V>
+ using Scalar = typename Overloads::ScalarType<std::decay_t<V> >::type;
+
+ //! Construct SIMD type with different scalar type
+ /**
+ * \tparam S The new scalar type
+ * \tparam V The SIMD (mask or vector) type.
+ *
+ * The resulting type a SIMD vector of `S` with the same number of lanes
+ * as `V`. `const`, `volatile` or reference qualifiers in `S` and `V` are
+ * automatically ignored, and the result will have no such qualifiers.
+ *
+ * Implementations shall rebind to `LoopSIMD<S, lanes<V>()>` if they can't
+ * support a particular rebind natively.
+ *
+ * Implemented by `Overloads::RebindType`.
+ */
+ template<class S, class V>
+ using Rebind =
+ typename Overloads::RebindType<std::decay_t<S>, std::decay_t<V>>::type;
+
+ //! @} group Basic interface
+
+ /** @name Syntactic Sugar
+ *
+ * Templates and functions in this group provide syntactic sugar, they are
+ * implemented using the functionality from @ref SimdInterfaceBase, and
+ * are not customizable by implementations.
+ *
+ * @{
+ */
+
+ //! Mask type type of some SIMD type
+ /**
+ * \tparam V The SIMD (mask or vector) type. `const`, `volatile` or
+ * reference qualifiers are automatically ignored.
+ *
+ * The mask type is kind of a SIMD vector of `bool` with the same number
+ * of lanes as `V`. It results from comparison operations between values
+ * of type `V`. It is only "kind of" a SIMD vector, because the
+ * guaranteed supported operations are extremely limited. At the moment
+ * only the logical operators `&&`, `||` and `!` and the "bitwise"
+ * operators `&`, `^` and `|` between masks are supported, and even with
+ * those operators you cannot rely on automatic broadcasting of `bool`
+ * values.
+ *
+ * \note In particular, masks do not support comparison. As a workaround
+ * you can use `^` instead of `!=` and `!(m1 ^ m2)` instead of `m1
+ * == m2`. (The reason why comparison is not supported is because
+ * in Vc `==` and `!=` between masks yield a single `bool` result
+ * and not a mask.)
+ *
+ * This is an alias for `Rebind<bool, V>`.
+ */
+ template<class V>
+ using Mask = Rebind<bool, V>;
+
+ //! @} group Syntactic Sugar
+
+ /** @name Basic interface
+ * @{
+ */
+
+ //! Number of lanes in a SIMD type
+ /**
+ * \tparam V The SIMD (mask or vector) type. `const`, `volatile`
+ * or reference qualifiers are automatically ignored.
+ *
+ * Implemented by `Overloads::LaneCount`.
+ */
+ template<class V>
+ constexpr std::size_t lanes()
+ {
+ return Overloads::LaneCount<std::decay_t<V>>::value;
+ }
+
+ //! Extract an element of a SIMD type
+ /**
+ * \param l Number of lane to extract
+ * \param v SIMD object to extract from
+ *
+ * \return If `v` is a non-`const` lvalue, a reference
+ * `Scalar<decay_t<V>>&`, or a proxy object through which the
+ * element of `v` may be modified. Overwise, `v` is a `const`
+ * lvalue or an rvalue, and the result is a prvalue (a temporary)
+ * of type `Scalar<decay_t<V>>`.
+ *
+ * Implemented by `Overloads::lane()`.
+ */
+ template<class V>
+ decltype(auto) lane(std::size_t l, V &&v)
+ {
+ assert(l < lanes<V>());
+ return lane(Overloads::ADLTag<7>{}, l, std::forward<V>(v));
+ }
+
+ //! Cast an expression from one implementation to another
+ /**
+ * Implemented by `Overloads::implCast()`
+ *
+ * Requires the scalar type and the number of lanes to match exactly.
+ *
+ * This is particularly useful for masks, which often know the type they
+ * were derived from. This can become a problem when doing a conditional
+ * operation e.g. on some floating point vector type, but with a mask
+ * derived from some index vector type.
+ *
+ * \note One of the few functions that explicitly take a template
+ * argument (`V` in this case).
+ */
+ template<class V, class U>
+ constexpr V implCast(U &&u)
+ {
+ static_assert(std::is_same<Scalar<V>, Scalar<U> >::value,
+ "Scalar types must match exactly in implCast");
+ static_assert(lanes<V>() == lanes<U>(),
+ "Number of lanes must match in implCast");
+ return implCast(Overloads::ADLTag<7>{}, MetaType<std::decay_t<V> >{},
+ std::forward<U>(u));
+ }
+
+ //! Broadcast a scalar to a vector explicitly
+ /**
+ * Implemented by `Overloads::broadcast()`
+ *
+ * This is useful because the syntax for broadcasting can vary wildly
+ * between implementations.
+ *
+ * \note One of the few functions that explicitly take a template
+ * argument (`V` in this case).
+ */
+ template<class V, class S>
+ constexpr V broadcast(S s)
+ {
+ return broadcast(Overloads::ADLTag<7>{}, MetaType<std::decay_t<V> >{},
+ std::move(s));
+ }
+
+ //! Like the ?: operator
+ /**
+ * Equivalent to
+ * \code
+ * V result;
+ * for(std::size_t l = 0; l < lanes(mask); ++l)
+ * lane(l, result) =
+ * ( lane(l, mask) ? lane(l, ifTrue) : lane(l ifFalse) );
+ * return result;
+ * \endcode
+ *
+ * Implemented by `Overloads::cond()`.
+ */
+ template<class M, class V>
+ V cond(M &&mask, const V &ifTrue, const V &ifFalse)
+ {
+ return cond(Overloads::ADLTag<7>{},
+ implCast<Mask<V> >(std::forward<M>(mask)), ifTrue, ifFalse);
+ }
+
+ //! Like the ?: operator
+ /**
+ * Overload for plain bool masks, accepting any simd type
+ *
+ * Implemented by `Overloads::cond()`.
+ */
+ template<class V>
+ V cond(bool mask, const V &ifTrue, const V &ifFalse)
+ {
+ return mask ? ifTrue : ifFalse;
+ }
+
+ //! The binary maximum value over two simd objects
+ /**
+ * Implemented by `Overloads::max()`.
+ */
+ template<class V>
+ auto max(const V &v1, const V &v2)
+ {
+ return max(Overloads::ADLTag<7>{}, v1, v2);
+ }
+
+ //! The binary minimum value over two simd objects
+ /**
+ * Implemented by `Overloads::min()`.
+ */
+ template<class V>
+ auto min(const V &v1, const V &v2)
+ {
+ return min(Overloads::ADLTag<7>{}, v1, v2);
+ }
+
+ //! Whether any entry is `true`
+ /**
+ * Implemented by `Overloads::anyTrue()`.
+ */
+ template<class Mask>
+ bool anyTrue(const Mask &mask)
+ {
+ return anyTrue(Overloads::ADLTag<7>{}, mask);
+ }
+
+ //! Whether all entries are `true`
+ /**
+ * Implemented by `Overloads::allTrue()`.
+ */
+ template<class Mask>
+ bool allTrue(const Mask &mask)
+ {
+ return allTrue(Overloads::ADLTag<7>{}, mask);
+ }
+
+ //! Whether any entry is `false`
+ /**
+ * Implemented by `Overloads::anyFalse()`.
+ */
+ template<class Mask>
+ bool anyFalse(const Mask &mask)
+ {
+ return anyFalse(Overloads::ADLTag<7>{}, mask);
+ }
+
+ //! Whether all entries are `false`
+ /**
+ * Implemented by `Overloads::allFalse()`.
+ */
+ template<class Mask>
+ bool allFalse(const Mask &mask)
+ {
+ return allFalse(Overloads::ADLTag<7>{}, mask);
+ }
+
+ //! The horizontal maximum value over all lanes
+ /**
+ * Implemented by `Overloads::max()`.
+ */
+ template<class V>
+ Scalar<V> max(const V &v)
+ {
+ return max(Overloads::ADLTag<7>{}, v);
+ }
+
+ //! The horizontal minimum value over all lanes
+ /**
+ * Implemented by `Overloads::min()`.
+ */
+ template<class V>
+ Scalar<V> min(const V &v)
+ {
+ return min(Overloads::ADLTag<7>{}, v);
+ }
+
+ //! Convert to mask, analogue of bool(s) for scalars
+ /**
+ * Implemented by `Overloads::mask()`.
+ */
+ template<class V>
+ auto mask(const V &v)
+ {
+ return mask(Overloads::ADLTag<7>{}, v);
+ }
+
+ //! Logic or of masks
+ /**
+ * Implemented by `Overloads::maskOr()`.
+ */
+ template<class V1, class V2>
+ auto maskOr(const V1 &v1, const V2 &v2)
+ {
+ return maskOr(Overloads::ADLTag<7>{}, v1, v2);
+ }
+
+ //! Logic and of masks
+ /**
+ * Implemented by `Overloads::maskAnd()`.
+ */
+ template<class V1, class V2>
+ auto maskAnd(const V1 &v1, const V2 &v2)
+ {
+ return maskAnd(Overloads::ADLTag<7>{}, v1, v2);
+ }
+
+ //! @} group Basic interface
+
+ /** @name Syntactic Sugar
+ *
+ * Templates and functions in this group provide syntactic sugar, they are
+ * implemented using the functionality from @ref SimdInterfaceBase, and
+ * are not customizable by implementations.
+ *
+ * @{
+ */
+
+ //! Number of lanes in a SIMD type
+ /**
+ * \tparam V The SIMD (mask or vector) type.
+ *
+ * The value of the parameter is ignored; the call is simply forwarded to
+ * `lanes<V>()`.
+ */
+ template<class V>
+ std::size_t lanes(const V &)
+ {
+ return lanes<V>();
+ }
+
+ //! @} group Syntactic Sugar
+
+ //! @} Group SIMDLib
+
+ } // namespace Simd
+} // namespace Dune
+
+#endif // DUNE_COMMON_SIMD_INTERFACE_HH
--- /dev/null
+#ifndef DUNE_COMMON_SIMD_IO_HH
+#define DUNE_COMMON_SIMD_IO_HH
+
+/** @file
+ * @brief IO interface of the SIMD abstraction
+ * @ingroup SIMDLib
+ *
+ * This file provides IO interface functions of the SIMD abstraction layer.
+ *
+ * This file is intended for direct inclusion by header making use of the IO
+ * interface.
+ */
+
+#include <ios>
+#include <type_traits>
+
+#include <dune/common/rangeutilities.hh>
+#include <dune/common/simd/simd.hh>
+#include <dune/common/typetraits.hh>
+
+namespace Dune {
+
+ namespace SimdImpl {
+
+ template<class T>
+ class Inserter {
+ T value_;
+
+ public:
+ Inserter(const T &value) : value_(value) {}
+
+ template<class Stream,
+ class = std::enable_if_t<std::is_base_of<std::ios_base,
+ Stream>::value> >
+ friend Stream& operator<<(Stream &out, const Inserter &ins)
+ {
+ const char *sep = "<";
+ for(auto l : range(Simd::lanes(ins.value_)))
+ {
+ out << sep << autoCopy(Simd::lane(l, ins.value_));
+ sep = ", ";
+ }
+ out << '>';
+ return out;
+ }
+ };
+
+ template<class V, class = std::enable_if_t<Simd::lanes<V>() != 1> >
+ Inserter<V> io(const V &v)
+ {
+ return { v };
+ }
+
+ template<class V, class = std::enable_if_t<Simd::lanes<V>() == 1> >
+ Simd::Scalar<V> io(const V &v)
+ {
+ return Simd::lane(0, v);
+ }
+
+ }
+
+ namespace Simd {
+
+ /** @addtogroup SIMDLib
+ *
+ * @{
+ *
+ */
+
+ /** @name IO interface
+ *
+ * Templates and functions in this group provide syntactic sugar for IO.
+ * They are implemented using the functionality from @ref
+ * SimdInterfaceBase, and are not customizable by implementations.
+ *
+ * @{
+ */
+
+ //! construct a stream inserter
+ /**
+ * \tparam V The SIMD (mask or vector) type.
+ *
+ * Construct an object that can be inserted into an output stream.
+ * Insertion prints the vector values separated by a comma and a space,
+ * and surrounded by angular brackets.
+ */
+ template<class V>
+ auto vio(const V &v)
+ {
+ return SimdImpl::Inserter<V>{ v };
+ }
+
+ //! construct a stream inserter
+ /**
+ * \tparam V The SIMD (mask or vector) type.
+ *
+ * Construct an object that can be inserted into an output stream. For
+ * one-lane vectors, behaves the same as scalar insertion. For multi-lane
+ * vectors, behaves as the inserter returned by `vio()`: insertion prints
+ * the vector values separated by a comma and a space, and surrounded by
+ * angular brackets.
+ */
+ template<class V>
+ auto io(const V &v)
+ {
+ return SimdImpl::io(v);
+ }
+
+ //! @} group IO interface
+
+ //! @} Group SIMDLib
+
+ } // namespace Simd
+} // namespace Dune
+
+#endif // DUNE_COMMON_SIMD_IO_HH
--- /dev/null
+#ifndef DUNE_COMMON_SIMD_LOOP_HH
+#define DUNE_COMMON_SIMD_LOOP_HH
+
+#include <array>
+#include <cmath>
+#include <cstddef>
+#include <cstdlib>
+#include <ostream>
+
+#include <dune/common/math.hh>
+#include <dune/common/simd/simd.hh>
+#include <dune/common/typetraits.hh>
+
+namespace Dune {
+
+/*
+ * silence warnings from GCC about using integer operands on a bool
+ * (when instantiated for T=bool)
+ */
+#if __GNUC__ >= 7
+# pragma GCC diagnostic push
+# pragma GCC diagnostic ignored "-Wbool-operation"
+# pragma GCC diagnostic ignored "-Wint-in-bool-context"
+#endif
+
+ /**
+ * This class specifies a vector-like type deriving from std::array
+ * for memory management and basic accessibility.
+ * This type is capable of dealing with all (well-defined) operators
+ * and is usable with the SIMD-interface.
+ *
+ * @tparam T Base type. Could also be vectorized type.
+ * @tparam S Size
+ * @tparam minimum alignment. It is inherited to rebound types.
+ */
+
+ template<class T, std::size_t S, std::size_t A = 0>
+ class alignas(A==0?alignof(T):A) LoopSIMD : public std::array<T,S> {
+
+ public:
+
+ //default constructor
+ LoopSIMD() {
+ assert(reinterpret_cast<uintptr_t>(this) % std::min(alignof(LoopSIMD<T,S,A>),alignof(std::max_align_t)) == 0);
+ }
+
+ // broadcast constructor initializing the content with a given value
+ LoopSIMD(Simd::Scalar<T> i) : LoopSIMD() {
+ this->fill(i);
+ }
+
+ template<std::size_t OA>
+ explicit LoopSIMD(const LoopSIMD<T,S,OA>& other)
+ : std::array<T,S>(other)
+ {
+ assert(reinterpret_cast<uintptr_t>(this) % std::min(alignof(LoopSIMD<T,S,A>),alignof(std::max_align_t)) == 0);
+ }
+
+ /*
+ * Definition of basic operators
+ */
+
+ //Prefix operators
+#define DUNE_SIMD_LOOP_PREFIX_OP(SYMBOL) \
+ auto operator SYMBOL() { \
+ for(std::size_t i=0; i<S; i++){ \
+ SYMBOL(*this)[i]; \
+ } \
+ return *this; \
+ } \
+ static_assert(true, "expecting ;")
+
+ DUNE_SIMD_LOOP_PREFIX_OP(++);
+ DUNE_SIMD_LOOP_PREFIX_OP(--);
+#undef DUNE_SIMD_LOOP_PREFIX_OP
+
+ //Unary operators
+#define DUNE_SIMD_LOOP_UNARY_OP(SYMBOL) \
+ auto operator SYMBOL() const { \
+ LoopSIMD<T,S,A> out; \
+ for(std::size_t i=0; i<S; i++){ \
+ out[i] = SYMBOL((*this)[i]); \
+ } \
+ return out; \
+ } \
+ static_assert(true, "expecting ;")
+
+ DUNE_SIMD_LOOP_UNARY_OP(+);
+ DUNE_SIMD_LOOP_UNARY_OP(-);
+ DUNE_SIMD_LOOP_UNARY_OP(~);
+
+ auto operator!() const {
+ Simd::Mask<LoopSIMD<T,S,A>> out;
+ for(std::size_t i=0; i<S; i++){
+ out[i] = !((*this)[i]);
+ }
+ return out;
+ }
+#undef DUNE_SIMD_LOOP_UNARY_OP
+
+ //Postfix operators
+#define DUNE_SIMD_LOOP_POSTFIX_OP(SYMBOL) \
+ auto operator SYMBOL(int){ \
+ LoopSIMD<T,S,A> out = *this; \
+ SYMBOL(*this); \
+ return out; \
+ } \
+ static_assert(true, "expecting ;")
+
+ DUNE_SIMD_LOOP_POSTFIX_OP(++);
+ DUNE_SIMD_LOOP_POSTFIX_OP(--);
+#undef DUNE_SIMD_LOOP_POSTFIX_OP
+
+ //Assignment operators
+#define DUNE_SIMD_LOOP_ASSIGNMENT_OP(SYMBOL) \
+ auto operator SYMBOL(const Simd::Scalar<T> s) { \
+ for(std::size_t i=0; i<S; i++){ \
+ (*this)[i] SYMBOL s; \
+ } \
+ return *this; \
+ } \
+ \
+ auto operator SYMBOL(const LoopSIMD<T,S,A> &v) { \
+ for(std::size_t i=0; i<S; i++){ \
+ (*this)[i] SYMBOL v[i]; \
+ } \
+ return *this; \
+ } \
+ static_assert(true, "expecting ;")
+
+ DUNE_SIMD_LOOP_ASSIGNMENT_OP(+=);
+ DUNE_SIMD_LOOP_ASSIGNMENT_OP(-=);
+ DUNE_SIMD_LOOP_ASSIGNMENT_OP(*=);
+ DUNE_SIMD_LOOP_ASSIGNMENT_OP(/=);
+ DUNE_SIMD_LOOP_ASSIGNMENT_OP(%=);
+ DUNE_SIMD_LOOP_ASSIGNMENT_OP(<<=);
+ DUNE_SIMD_LOOP_ASSIGNMENT_OP(>>=);
+ DUNE_SIMD_LOOP_ASSIGNMENT_OP(&=);
+ DUNE_SIMD_LOOP_ASSIGNMENT_OP(|=);
+ DUNE_SIMD_LOOP_ASSIGNMENT_OP(^=);
+#undef DUNE_SIMD_LOOP_ASSIGNMENT_OP
+ };
+
+ //Arithmetic operators
+#define DUNE_SIMD_LOOP_BINARY_OP(SYMBOL) \
+ template<class T, std::size_t S, std::size_t A> \
+ auto operator SYMBOL(const LoopSIMD<T,S,A> &v, const Simd::Scalar<T> s) { \
+ LoopSIMD<T,S,A> out; \
+ for(std::size_t i=0; i<S; i++){ \
+ out[i] = v[i] SYMBOL s; \
+ } \
+ return out; \
+ } \
+ template<class T, std::size_t S, std::size_t A> \
+ auto operator SYMBOL(const Simd::Scalar<T> s, const LoopSIMD<T,S,A> &v) { \
+ LoopSIMD<T,S,A> out; \
+ for(std::size_t i=0; i<S; i++){ \
+ out[i] = s SYMBOL v[i]; \
+ } \
+ return out; \
+ } \
+ template<class T, std::size_t S, std::size_t A> \
+ auto operator SYMBOL(const LoopSIMD<T,S,A> &v, \
+ const LoopSIMD<T,S,A> &w) { \
+ LoopSIMD<T,S,A> out; \
+ for(std::size_t i=0; i<S; i++){ \
+ out[i] = v[i] SYMBOL w[i]; \
+ } \
+ return out; \
+ } \
+ static_assert(true, "expecting ;")
+
+ DUNE_SIMD_LOOP_BINARY_OP(+);
+ DUNE_SIMD_LOOP_BINARY_OP(-);
+ DUNE_SIMD_LOOP_BINARY_OP(*);
+ DUNE_SIMD_LOOP_BINARY_OP(/);
+ DUNE_SIMD_LOOP_BINARY_OP(%);
+
+ DUNE_SIMD_LOOP_BINARY_OP(&);
+ DUNE_SIMD_LOOP_BINARY_OP(|);
+ DUNE_SIMD_LOOP_BINARY_OP(^);
+
+#undef DUNE_SIMD_LOOP_BINARY_OP
+
+ //Bitshift operators
+#define DUNE_SIMD_LOOP_BITSHIFT_OP(SYMBOL) \
+ template<class T, std::size_t S, std::size_t A, class U> \
+ auto operator SYMBOL(const LoopSIMD<T,S,A> &v, const U s) { \
+ LoopSIMD<T,S,A> out; \
+ for(std::size_t i=0; i<S; i++){ \
+ out[i] = v[i] SYMBOL s; \
+ } \
+ return out; \
+ } \
+ template<class T, std::size_t S, std::size_t A, class U, std::size_t AU> \
+ auto operator SYMBOL(const LoopSIMD<T,S,A> &v, \
+ const LoopSIMD<U,S,AU> &w) { \
+ LoopSIMD<T,S,A> out; \
+ for(std::size_t i=0; i<S; i++){ \
+ out[i] = v[i] SYMBOL w[i]; \
+ } \
+ return out; \
+ } \
+ static_assert(true, "expecting ;")
+
+ DUNE_SIMD_LOOP_BITSHIFT_OP(<<);
+ DUNE_SIMD_LOOP_BITSHIFT_OP(>>);
+
+#undef DUNE_SIMD_LOOP_BITSHIFT_OP
+
+ //Comparison operators
+#define DUNE_SIMD_LOOP_COMPARISON_OP(SYMBOL) \
+ template<class T, std::size_t S, std::size_t A, class U> \
+ auto operator SYMBOL(const LoopSIMD<T,S,A> &v, const U s) { \
+ Simd::Mask<LoopSIMD<T,S,A>> out; \
+ for(std::size_t i=0; i<S; i++){ \
+ out[i] = v[i] SYMBOL s; \
+ } \
+ return out; \
+ } \
+ template<class T, std::size_t S, std::size_t A> \
+ auto operator SYMBOL(const Simd::Scalar<T> s, const LoopSIMD<T,S,A> &v) { \
+ Simd::Mask<LoopSIMD<T,S,A>> out; \
+ for(std::size_t i=0; i<S; i++){ \
+ out[i] = s SYMBOL v[i]; \
+ } \
+ return out; \
+ } \
+ template<class T, std::size_t S, std::size_t A> \
+ auto operator SYMBOL(const LoopSIMD<T,S,A> &v, \
+ const LoopSIMD<T,S,A> &w) { \
+ Simd::Mask<LoopSIMD<T,S,A>> out; \
+ for(std::size_t i=0; i<S; i++){ \
+ out[i] = v[i] SYMBOL w[i]; \
+ } \
+ return out; \
+ } \
+ static_assert(true, "expecting ;")
+
+ DUNE_SIMD_LOOP_COMPARISON_OP(<);
+ DUNE_SIMD_LOOP_COMPARISON_OP(>);
+ DUNE_SIMD_LOOP_COMPARISON_OP(<=);
+ DUNE_SIMD_LOOP_COMPARISON_OP(>=);
+ DUNE_SIMD_LOOP_COMPARISON_OP(==);
+ DUNE_SIMD_LOOP_COMPARISON_OP(!=);
+#undef DUNE_SIMD_LOOP_COMPARISON_OP
+
+ //Boolean operators
+#define DUNE_SIMD_LOOP_BOOLEAN_OP(SYMBOL) \
+ template<class T, std::size_t S, std::size_t A> \
+ auto operator SYMBOL(const LoopSIMD<T,S,A> &v, const Simd::Scalar<T> s) { \
+ Simd::Mask<LoopSIMD<T,S,A>> out; \
+ for(std::size_t i=0; i<S; i++){ \
+ out[i] = v[i] SYMBOL s; \
+ } \
+ return out; \
+ } \
+ template<class T, std::size_t S, std::size_t A> \
+ auto operator SYMBOL(const Simd::Mask<T> s, const LoopSIMD<T,S,A> &v) { \
+ Simd::Mask<LoopSIMD<T,S,A>> out; \
+ for(std::size_t i=0; i<S; i++){ \
+ out[i] = s SYMBOL v[i]; \
+ } \
+ return out; \
+ } \
+ template<class T, std::size_t S, std::size_t A> \
+ auto operator SYMBOL(const LoopSIMD<T,S,A> &v, \
+ const LoopSIMD<T,S,A> &w) { \
+ Simd::Mask<LoopSIMD<T,S,A>> out; \
+ for(std::size_t i=0; i<S; i++){ \
+ out[i] = v[i] SYMBOL w[i]; \
+ } \
+ return out; \
+ } \
+ static_assert(true, "expecting ;")
+
+ DUNE_SIMD_LOOP_BOOLEAN_OP(&&);
+ DUNE_SIMD_LOOP_BOOLEAN_OP(||);
+#undef DUNE_SIMD_LOOP_BOOLEAN_OP
+
+ //prints a given LoopSIMD
+ template<class T, std::size_t S, std::size_t A>
+ std::ostream& operator<< (std::ostream &os, const LoopSIMD<T,S,A> &v) {
+ os << "[";
+ for(std::size_t i=0; i<S-1; i++) {
+ os << v[i] << ", ";
+ }
+ os << v[S-1] << "]";
+ return os;
+ }
+
+ namespace Simd {
+ namespace Overloads {
+ /*
+ * Implementation/Overloads of the functions needed for
+ * SIMD-interface-compatibility
+ */
+
+ //Implementation of SIMD-interface-types
+ template<class T, std::size_t S, std::size_t A>
+ struct ScalarType<LoopSIMD<T,S,A>> {
+ using type = Simd::Scalar<T>;
+ };
+
+ template<class U, class T, std::size_t S, std::size_t A>
+ struct RebindType<U, LoopSIMD<T,S,A>> {
+ using type = LoopSIMD<Simd::Rebind<U, T>,S,A>;
+ };
+
+ //Implementation of SIMD-interface-functionality
+ template<class T, std::size_t S, std::size_t A>
+ struct LaneCount<LoopSIMD<T,S,A>> : index_constant<S*lanes<T>()> {};
+
+ template<class T, std::size_t S, std::size_t A>
+ auto lane(ADLTag<5>, std::size_t l, LoopSIMD<T,S,A> &&v)
+ -> decltype(std::move(Simd::lane(l%lanes<T>(), v[l/lanes<T>()])))
+ {
+ return std::move(Simd::lane(l%lanes<T>(), v[l/lanes<T>()]));
+ }
+
+ template<class T, std::size_t S, std::size_t A>
+ auto lane(ADLTag<5>, std::size_t l, const LoopSIMD<T,S,A> &v)
+ -> decltype(Simd::lane(l%lanes<T>(), v[l/lanes<T>()]))
+ {
+ return Simd::lane(l%lanes<T>(), v[l/lanes<T>()]);
+ }
+
+ template<class T, std::size_t S, std::size_t A>
+ auto lane(ADLTag<5>, std::size_t l, LoopSIMD<T,S,A> &v)
+ -> decltype(Simd::lane(l%lanes<T>(), v[l/lanes<T>()]))
+ {
+ return Simd::lane(l%lanes<T>(), v[l/lanes<T>()]);
+ }
+
+ template<class T, std::size_t S, std::size_t AM, std::size_t AD>
+ auto cond(ADLTag<5>, Simd::Mask<LoopSIMD<T,S,AM>> mask,
+ LoopSIMD<T,S,AD> ifTrue, LoopSIMD<T,S,AD> ifFalse) {
+ LoopSIMD<T,S,AD> out;
+ for(std::size_t i=0; i<S; i++) {
+ out[i] = Simd::cond(mask[i], ifTrue[i], ifFalse[i]);
+ }
+ return out;
+ }
+
+ template<class M, class T, std::size_t S, std::size_t A>
+ auto cond(ADLTag<5, std::is_same<bool, Simd::Scalar<M> >::value
+ && Simd::lanes<M>() == Simd::lanes<LoopSIMD<T,S,A> >()>,
+ M mask, LoopSIMD<T,S,A> ifTrue, LoopSIMD<T,S,A> ifFalse)
+ {
+ LoopSIMD<T,S,A> out;
+ for(auto l : range(Simd::lanes(mask)))
+ Simd::lane(l, out) = Simd::lane(l, mask) ? Simd::lane(l, ifTrue) : Simd::lane(l, ifFalse);
+ return out;
+ }
+
+ template<class M, std::size_t S, std::size_t A>
+ bool anyTrue(ADLTag<5>, LoopSIMD<M,S,A> mask) {
+ bool out = false;
+ for(std::size_t i=0; i<S; i++) {
+ out |= Simd::anyTrue(mask[i]);
+ }
+ return out;
+ }
+
+ template<class M, std::size_t S, std::size_t A>
+ bool allTrue(ADLTag<5>, LoopSIMD<M,S,A> mask) {
+ bool out = true;
+ for(std::size_t i=0; i<S; i++) {
+ out &= Simd::allTrue(mask[i]);
+ }
+ return out;
+ }
+
+ template<class M, std::size_t S, std::size_t A>
+ bool anyFalse(ADLTag<5>, LoopSIMD<M,S,A> mask) {
+ bool out = false;
+ for(std::size_t i=0; i<S; i++) {
+ out |= Simd::anyFalse(mask[i]);
+ }
+ return out;
+ }
+
+ template<class M, std::size_t S, std::size_t A>
+ bool allFalse(ADLTag<5>, LoopSIMD<M,S,A> mask) {
+ bool out = true;
+ for(std::size_t i=0; i<S; i++) {
+ out &= Simd::allFalse(mask[i]);
+ }
+ return out;
+ }
+ } //namespace Overloads
+
+ } //namespace Simd
+
+
+ /*
+ * Overloads the unary cmath-operations. Operations requiring
+ * or returning more than one argument are not supported.
+ * Due to inconsistency with the return values, cmath-operations
+ * on integral types are also not supported-
+ */
+
+#define DUNE_SIMD_LOOP_CMATH_UNARY_OP(expr) \
+ template<class T, std::size_t S, std::size_t A, typename Sfinae = \
+ typename std::enable_if_t<!std::is_integral<Simd::Scalar<T>>::value> > \
+ auto expr(const LoopSIMD<T,S,A> &v) { \
+ using std::expr; \
+ LoopSIMD<T,S,A> out; \
+ for(std::size_t i=0; i<S; i++) { \
+ out[i] = expr(v[i]); \
+ } \
+ return out; \
+ } \
+ static_assert(true, "expecting ;")
+
+#define DUNE_SIMD_LOOP_CMATH_UNARY_OP_WITH_RETURN(expr, returnType) \
+ template<class T, std::size_t S, std::size_t A, typename Sfinae = \
+ typename std::enable_if_t<!std::is_integral<Simd::Scalar<T>>::value> > \
+ auto expr(const LoopSIMD<T,S,A> &v) { \
+ using std::expr; \
+ LoopSIMD<returnType,S> out; \
+ for(std::size_t i=0; i<S; i++) { \
+ out[i] = expr(v[i]); \
+ } \
+ return out; \
+ } \
+ static_assert(true, "expecting ;")
+
+ DUNE_SIMD_LOOP_CMATH_UNARY_OP(cos);
+ DUNE_SIMD_LOOP_CMATH_UNARY_OP(sin);
+ DUNE_SIMD_LOOP_CMATH_UNARY_OP(tan);
+ DUNE_SIMD_LOOP_CMATH_UNARY_OP(acos);
+ DUNE_SIMD_LOOP_CMATH_UNARY_OP(asin);
+ DUNE_SIMD_LOOP_CMATH_UNARY_OP(atan);
+ DUNE_SIMD_LOOP_CMATH_UNARY_OP(cosh);
+ DUNE_SIMD_LOOP_CMATH_UNARY_OP(sinh);
+ DUNE_SIMD_LOOP_CMATH_UNARY_OP(tanh);
+ DUNE_SIMD_LOOP_CMATH_UNARY_OP(acosh);
+ DUNE_SIMD_LOOP_CMATH_UNARY_OP(asinh);
+ DUNE_SIMD_LOOP_CMATH_UNARY_OP(atanh);
+
+ DUNE_SIMD_LOOP_CMATH_UNARY_OP(exp);
+ DUNE_SIMD_LOOP_CMATH_UNARY_OP(log);
+ DUNE_SIMD_LOOP_CMATH_UNARY_OP(log10);
+ DUNE_SIMD_LOOP_CMATH_UNARY_OP(exp2);
+ DUNE_SIMD_LOOP_CMATH_UNARY_OP(expm1);
+ DUNE_SIMD_LOOP_CMATH_UNARY_OP_WITH_RETURN(ilogb, int);
+ DUNE_SIMD_LOOP_CMATH_UNARY_OP(log1p);
+ DUNE_SIMD_LOOP_CMATH_UNARY_OP(log2);
+ DUNE_SIMD_LOOP_CMATH_UNARY_OP(logb);
+
+ DUNE_SIMD_LOOP_CMATH_UNARY_OP(sqrt);
+ DUNE_SIMD_LOOP_CMATH_UNARY_OP(cbrt);
+
+ DUNE_SIMD_LOOP_CMATH_UNARY_OP(erf);
+ DUNE_SIMD_LOOP_CMATH_UNARY_OP(erfc);
+ DUNE_SIMD_LOOP_CMATH_UNARY_OP(tgamma);
+ DUNE_SIMD_LOOP_CMATH_UNARY_OP(lgamma);
+
+ DUNE_SIMD_LOOP_CMATH_UNARY_OP(ceil);
+ DUNE_SIMD_LOOP_CMATH_UNARY_OP(floor);
+ DUNE_SIMD_LOOP_CMATH_UNARY_OP(trunc);
+ DUNE_SIMD_LOOP_CMATH_UNARY_OP(round);
+ DUNE_SIMD_LOOP_CMATH_UNARY_OP_WITH_RETURN(lround, long);
+ DUNE_SIMD_LOOP_CMATH_UNARY_OP_WITH_RETURN(llround, long long);
+ DUNE_SIMD_LOOP_CMATH_UNARY_OP(rint);
+ DUNE_SIMD_LOOP_CMATH_UNARY_OP_WITH_RETURN(lrint, long);
+ DUNE_SIMD_LOOP_CMATH_UNARY_OP_WITH_RETURN(llrint, long long);
+ DUNE_SIMD_LOOP_CMATH_UNARY_OP(nearbyint);
+
+ DUNE_SIMD_LOOP_CMATH_UNARY_OP(fabs);
+ DUNE_SIMD_LOOP_CMATH_UNARY_OP(abs);
+
+#undef DUNE_SIMD_LOOP_CMATH_UNARY_OP
+#undef DUNE_SIMD_LOOP_CMATH_UNARY_OP_WITH_RETURN
+
+
+ /* not implemented cmath-functions:
+ * atan2
+ * frexp, idexp
+ * modf
+ * scalbn, scalbln
+ * pow
+ * hypot
+ * remainder, remquo
+ * copysign
+ * nan
+ * nextafter, nexttoward
+ * fdim, fmax, fmin
+ */
+
+ /*
+ * Overloads specific functions usually provided by the std library
+ * More overloads will be provided should the need arise.
+ */
+
+#define DUNE_SIMD_LOOP_STD_UNARY_OP(expr) \
+ template<class T, std::size_t S, std::size_t A> \
+ auto expr(const LoopSIMD<T,S,A> &v) { \
+ using std::expr; \
+ LoopSIMD<T,S,A> out; \
+ for(std::size_t i=0; i<S; i++) { \
+ out[i] = expr(v[i]); \
+ } \
+ return out; \
+ } \
+ \
+ template<class T, std::size_t S, std::size_t A> \
+ auto expr(const LoopSIMD<std::complex<T>,S,A> &v) { \
+ using std::expr; \
+ LoopSIMD<T,S,A> out; \
+ for(std::size_t i=0; i<S; i++) { \
+ out[i] = expr(v[i]); \
+ } \
+ return out; \
+ } \
+ static_assert(true, "expecting ;")
+
+ DUNE_SIMD_LOOP_STD_UNARY_OP(real);
+ DUNE_SIMD_LOOP_STD_UNARY_OP(imag);
+
+#undef DUNE_SIMD_LOOP_STD_UNARY_OP
+
+#define DUNE_SIMD_LOOP_STD_BINARY_OP(expr) \
+ template<class T, std::size_t S, std::size_t A> \
+ auto expr(const LoopSIMD<T,S,A> &v, const LoopSIMD<T,S,A> &w) { \
+ using std::expr; \
+ LoopSIMD<T,S,A> out; \
+ for(std::size_t i=0; i<S; i++) { \
+ out[i] = expr(v[i],w[i]); \
+ } \
+ return out; \
+ } \
+ static_assert(true, "expecting ;")
+
+ DUNE_SIMD_LOOP_STD_BINARY_OP(max);
+ DUNE_SIMD_LOOP_STD_BINARY_OP(min);
+
+#undef DUNE_SIMD_LOOP_STD_BINARY_OP
+
+ namespace MathOverloads {
+ template<class T, std::size_t S, std::size_t A>
+ auto isNaN(const LoopSIMD<T,S,A> &v, PriorityTag<3>, ADLTag) {
+ Simd::Mask<LoopSIMD<T,S,A>> out;
+ for(auto l : range(S))
+ out[l] = Dune::isNaN(v[l]);
+ return out;
+ }
+
+ template<class T, std::size_t S, std::size_t A>
+ auto isInf(const LoopSIMD<T,S,A> &v, PriorityTag<3>, ADLTag) {
+ Simd::Mask<LoopSIMD<T,S,A>> out;
+ for(auto l : range(S))
+ out[l] = Dune::isInf(v[l]);
+ return out;
+ }
+
+ template<class T, std::size_t S, std::size_t A>
+ auto isFinite(const LoopSIMD<T,S,A> &v, PriorityTag<3>, ADLTag) {
+ Simd::Mask<LoopSIMD<T,S,A>> out;
+ for(auto l : range(S))
+ out[l] = Dune::isFinite(v[l]);
+ return out;
+ }
+ } //namepace MathOverloads
+
+ template<class T, std::size_t S, std::size_t A>
+ struct IsNumber<LoopSIMD<T,S,A>> :
+ public std::integral_constant<bool, IsNumber<T>::value>{
+ };
+
+#if __GNUC__ >= 7
+# pragma GCC diagnostic pop
+#endif
+
+} //namespace Dune
+
+#endif
--- /dev/null
+#ifndef DUNE_COMMON_SIMD_SIMD_HH
+#define DUNE_COMMON_SIMD_SIMD_HH
+
+/** @file
+ * @brief Include file for users of the SIMD abstraction layer
+ *
+ * Include this file if you want to be able to handle SIMD types -- do not
+ * include the internal headers directly.
+ */
+
+#include <dune/common/simd/interface.hh>
+#include <dune/common/simd/standard.hh>
+
+#endif // DUNE_COMMON_SIMD_SIMD_HH
--- /dev/null
+#ifndef DUNE_COMMON_SIMD_STANDARD_HH
+#define DUNE_COMMON_SIMD_STANDARD_HH
+
+/** @file
+ * @ingroup SIMDStandard
+ * @brief SIMD abstractions for the standard built-in types
+ *
+ * This file should not normally be included by users of the SIMD abstraction
+ * (i.e. other Dune headers). Neither should it be included by the
+ * translation units passing built-in types to Dune headers that in turn
+ * support SIMD types through the SIMD abstraction. Dune-functionality always
+ * supports built-in types. Either because that functionality does not
+ * support SIMD types and so only supports built-in types, or if it does
+ * support SIMD types it must include `<dune/common/simd/simd.hh>`, which in
+ * turn includes this header.
+ */
+
+#include <cstddef>
+#include <type_traits>
+#include <utility>
+
+#include <dune/common/indices.hh>
+#include <dune/common/simd/base.hh>
+#include <dune/common/simd/defaults.hh>
+
+/** @defgroup SIMDStandard SIMD Abstraction Implementation for standard types
+ * @ingroup SIMDApp
+ *
+ * This implements the vectorization interface for scalar types. It applies
+ * to any type that does not have a specialized interface implementation.
+ *
+ * As an application developer, there is nothing special you need to do to get
+ * support for standard types in the vectorization abstraction. If the dune
+ * classes you are using provide support for vectorization, they will include
+ * `<dune/common/simd/simd.hh>`, which will pull in the abstraction for
+ * standard types automatically. You simply need to make sure that the types
+ * themselves are supported:
+ * - for built-in types there is nothing you need to do,
+ * - for `std::complex`, you need to `#include <complex>`
+ * - etc.
+ */
+
+namespace Dune {
+ namespace Simd {
+
+ namespace Overloads {
+
+ /** @name Specialized classes and overloaded functions
+ * @ingroup SIMDStandard
+ * @{
+ */
+
+ //! should have a member type \c type
+ /**
+ * Implements Simd::Scalar
+ */
+ template<class V, class>
+ struct ScalarType { using type = V; };
+
+ //! should have a member type \c type
+ /**
+ * Implements Simd::Rebind
+ */
+ template<class S, class, class>
+ struct RebindType { using type = S; };
+
+ //! should be derived from an Dune::index_constant
+ /**
+ * Implements Simd::lanes()
+ */
+ template<class, class>
+ struct LaneCount : public index_constant<1> { };
+
+ //! implements Simd::lane()
+ /**
+ * This binds to rvalues and const lvalues. It would bind to non-const
+ * lvalues too, but those are caught by the overload with ADLTag<3>.
+ * Using a universal reference here would bind to any argument with a
+ * perfect match. This would mean ambiguous overloads with other
+ * abstractions, if those only declare overloads for `const TheirType &`
+ * and `TheirType &`, because because universal references match
+ * perfectly.
+ */
+ template<class V>
+ V lane(ADLTag<2>, std::size_t, V v)
+ {
+ return v;
+ }
+
+ template<class V>
+ V &lane(ADLTag<3>, std::size_t, V &v)
+ {
+ return v;
+ }
+
+ // No Simd::cond() implementation, the overload for bool masks in the
+ // interface is sufficient
+
+ //! implements Simd::anyTrue()
+ inline bool anyTrue(ADLTag<2>, bool mask) { return mask; }
+
+ //! implements Simd::allTrue()
+ inline bool allTrue(ADLTag<2>, bool mask) { return mask; }
+
+ //! implements Simd::anyFalse()
+ inline bool anyFalse(ADLTag<2>, bool mask) { return !mask; }
+
+ //! implements Simd::allFalse()
+ inline bool allFalse(ADLTag<2>, bool mask) { return !mask; }
+
+ //! @} group SIMDStandard
+
+ } // namespace Overloads
+ } // namespace Simd
+} // namespace Dune
+
+#endif // DUNE_COMMON_SIMD_STANDARD_HH
--- /dev/null
+#include <config.h>
+
+#include <ostream>
+#include <string>
+
+#include <dune/common/simd/test.hh>
+
+void Dune::Simd::UnitTest::complain(const char *file, int line,
+ const char *func, const char *expr)
+{
+ log_ << file << ":" << line << ": In " << func << ": Error: check (" << expr
+ << ") failed" << std::endl;
+ good_ = false;
+}
+
+void Dune::Simd::UnitTest::
+complain(const char *file, int line, const char *func,
+ const std::string &opname, const char *expr)
+{
+ log_ << file << ":" << line << ": In " << func << ", while testing "
+ << opname << ": Error: check (" << expr << ") failed" << std::endl;
+ good_ = false;
+}
--- /dev/null
+#ifndef DUNE_COMMON_SIMD_TEST_HH
+#define DUNE_COMMON_SIMD_TEST_HH
+
+/** @file
+ * @brief Common tests for simd abstraction implementations
+ *
+ * This file is an interface header and may be included without restrictions.
+ */
+
+#include <algorithm>
+#include <cstddef>
+#include <iostream>
+#include <sstream>
+#include <string>
+#include <type_traits>
+#include <typeindex>
+#include <typeinfo>
+#include <unordered_set>
+#include <utility>
+
+#include <dune/common/classname.hh>
+#include <dune/common/hybridutilities.hh>
+#include <dune/common/rangeutilities.hh>
+#include <dune/common/simd/io.hh>
+#include <dune/common/simd/loop.hh>
+#include <dune/common/simd/simd.hh>
+#include <dune/common/std/type_traits.hh>
+#include <dune/common/typelist.hh>
+#include <dune/common/typetraits.hh>
+
+namespace Dune {
+ namespace Simd {
+
+ namespace Impl {
+
+ template<class Expr, class SFINAE = void>
+ struct CanCall; // not defined unless Expr has the form Op(Args...)
+ template<class Op, class... Args, class SFINAE>
+ struct CanCall<Op(Args...), SFINAE> : std::false_type {};
+ template<class Op, class... Args>
+ struct CanCall<Op(Args...), std::void_t<std::result_of_t<Op(Args...)> > >
+ : std::true_type
+ {};
+
+ template<class T, class SFINAE = void>
+ struct LessThenComparable : std::false_type {};
+ template<class T>
+ struct LessThenComparable<T, std::void_t<decltype(std::declval<T>()
+ < std::declval<T>())> > :
+ std::true_type
+ {};
+
+ template<class Dst, class Src>
+ struct CopyConstHelper
+ {
+ using type = Dst;
+ };
+ template<class Dst, class Src>
+ struct CopyConstHelper<Dst, const Src>
+ {
+ using type = std::add_const_t<Dst>;
+ };
+
+ template<class Dst, class Src>
+ struct CopyVolatileHelper
+ {
+ using type = Dst;
+ };
+ template<class Dst, class Src>
+ struct CopyVolatileHelper<Dst, volatile Src>
+ {
+ using type = std::add_volatile_t<Dst>;
+ };
+
+ template<class Dst, class Src>
+ struct CopyReferenceHelper
+ {
+ using type = Dst;
+ };
+ template<class Dst, class Src>
+ struct CopyReferenceHelper<Dst, Src&>
+ {
+ using type = std::add_lvalue_reference_t<Dst>;
+ };
+
+ template<class Dst, class Src>
+ struct CopyReferenceHelper<Dst, Src&&>
+ {
+ using type = std::add_rvalue_reference_t<Dst>;
+ };
+
+ template<class Dst, class Src>
+ using CopyRefQual = typename CopyReferenceHelper<
+ typename CopyVolatileHelper<
+ typename CopyConstHelper<
+ std::decay_t<Dst>,
+ std::remove_reference_t<Src>
+ >::type,
+ std::remove_reference_t<Src>
+ >::type,
+ Src
+ >::type;
+
+ template<class Mark, class Types,
+ class Indices =
+ std::make_index_sequence<TypeListSize<Types>::value - 1> >
+ struct RemoveEnd;
+ template<class Mark, class Types, std::size_t... I>
+ struct RemoveEnd<Mark, Types, std::index_sequence<I...>>
+ {
+ using Back = TypeListEntry_t<TypeListSize<Types>::value - 1, Types>;
+ static_assert(std::is_same<Mark, Back>::value,
+ "TypeList not terminated by proper EndMark");
+ using type = TypeList<TypeListEntry_t<I, Types>...>;
+ };
+
+ template<class T, class List, class = void>
+ struct TypeInList;
+
+ template<class T>
+ struct TypeInList<T, TypeList<> > : std::false_type {};
+
+ template<class T, class... Rest>
+ struct TypeInList<T, TypeList<T, Rest...> > : std::true_type {};
+
+ template<class T, class Head, class... Rest>
+ struct TypeInList<T, TypeList<Head, Rest...>,
+ std::enable_if_t<!std::is_same<T, Head>::value> > :
+ TypeInList<T, TypeList<Rest...> >::type
+ {};
+
+ template<class T>
+ struct IsLoop : std::false_type {};
+ template<class T, std::size_t S>
+ struct IsLoop<LoopSIMD<T, S> > : std::true_type {};
+
+ // used inside static_assert to trick the compiler into printing a list
+ // of types:
+ //
+ // static_assert(debugTypes<V>(Std::bool_constant<condition>{}), "msg");
+ //
+ // Should include what the type `V` expands to in the error message.
+ template<class...>
+ constexpr bool debugTypes(std::true_type) { return true; }
+ template<class... Types>
+ [[deprecated]]
+ constexpr bool debugTypes(std::false_type) { return false; }
+
+ } // namespace Impl
+
+ //! final element marker for `RebindList`
+ struct EndMark {};
+ //! A list of types with the final element removed
+ /**
+ * This is `TypeList<NoEndTypes..>`, where `NoEndTypes...` is `Types...`
+ * with the final element removed. The final element in `Types...` is
+ * required to be `EndMark`.
+ *
+ * This is useful to construct type lists in generated source files, since
+ * you don't need to avoid generating a trailing `,` in the list -- just
+ * terminate it with `EndMark`.
+ */
+ template<class... Types>
+ using RebindList =
+ typename Impl::RemoveEnd<EndMark, TypeList<Types...> >::type;
+
+ //! check whether a type is an instance of LoopSIMD
+ template<class T>
+ using IsLoop = typename Impl::IsLoop<T>::type;
+
+ class UnitTest {
+ bool good_ = true;
+ std::ostream &log_ = std::cerr;
+ // records the types for which checks have started running to avoid
+ // infinite recursion
+ std::unordered_set<std::type_index> seen_;
+
+ ////////////////////////////////////////////////////////////////////////
+ //
+ // Helper functions
+ //
+
+ void complain(const char *file, int line, const char *func,
+ const char *expr);
+
+ void complain(const char *file, int line, const char *func,
+ const std::string &opname, const char *expr);
+
+ // This macro is defined only within this file, do not use anywhere
+ // else. Doing the actual printing in an external function dramatically
+ // reduces memory use during compilation. Defined in such a way that
+ // the call will only happen for failed checks.
+#define DUNE_SIMD_CHECK(expr) \
+ ((expr) ? void() : complain(__FILE__, __LINE__, __func__, #expr))
+
+ // the function using this macro must define a way to compute the
+ // operator name in DUNE_SIMD_OPNAME
+#define DUNE_SIMD_CHECK_OP(expr) \
+ ((expr) ? void() : complain(__FILE__, __LINE__, __func__, \
+ DUNE_SIMD_OPNAME, #expr))
+
+ // "cast" into a prvalue
+ template<class T>
+ static std::decay_t<T> prvalue(T &&t)
+ {
+ return std::forward<T>(t);
+ }
+
+ // whether the vector is 42 in all lanes
+ template<class V>
+ static bool is42(const V &v)
+ {
+ bool good = true;
+
+ for(std::size_t l = 0; l < lanes(v); ++l)
+ // need to cast in case we have a mask type
+ good &= (lane(l, v) == Scalar<V>(42));
+
+ return good;
+ }
+
+ // make a vector that contains the sequence { 1, 2, ... }
+ template<class V>
+ static V make123()
+ {
+ // initialize to avoid undefined behaviour if assigning to lane()
+ // involves lvalue-to-rvalue conversions, e.g. due to bitmask
+ // operations. Avoid using broadcast<V>() for initialization to avoid
+ // test interdependencies.
+ V vec(Scalar<V>(0));
+ for(std::size_t l = 0; l < lanes(vec); ++l)
+ lane(l, vec) = l + 1;
+ return vec;
+ }
+
+ // whether the vector contains the sequence { 1, 2, ... }
+ template<class V>
+ static bool is123(const V &v)
+ {
+ bool good = true;
+
+ for(std::size_t l = 0; l < lanes(v); ++l)
+ // need to cast in case we have a mask type
+ good &= (lane(l, v) == Scalar<V>(l+1));
+
+ return good;
+ }
+
+ template<class V>
+ static V leftVector()
+ {
+ // Avoid using broadcast<V>() for initialization to avoid test
+ // interdependencies.
+ V res(Scalar<V>(0));
+ for(std::size_t l = 0; l < lanes(res); ++l)
+ lane(l, res) = Scalar<V>(l+1);
+ return res;
+ }
+
+ template<class V>
+ static V rightVector()
+ {
+ // Avoid using broadcast<V>() for initialization to avoid test
+ // interdependencies.
+ V res(Scalar<V>(0));
+ for(std::size_t l = 0; l < lanes(res); ++l)
+ // do not exceed number of bits in char (for shifts)
+ // avoid 0 (for / and %)
+ lane(l, res) = Scalar<V>((l)%7+1);
+ return res;
+ }
+
+ template<class T>
+ static T leftScalar()
+ {
+ return T(42);
+ }
+
+ template<class T>
+ static T rightScalar()
+ {
+ // do not exceed number of bits in char (for shifts)
+ // avoid 0 (for / and %)
+ return T(5);
+ }
+
+ template<class Call>
+ using CanCall = Impl::CanCall<Call>;
+
+ template<class Dst, class Src>
+ using CopyRefQual = Impl::CopyRefQual<Dst, Src>;
+
+ // test whether the Op supports the operation on scalars. We do not use
+ // `lane()` to obtain the scalars, because that might return a proxy
+ // object, and we are interested in what exactly the scalar type can do,
+ // no a proxy that might have more overloads than needed. In addition,
+ // `lane()` may not preserve `const` and reference qualifiers.
+ template<class Op, class... Vectors>
+ using ScalarResult =
+ decltype(std::declval<Op>().
+ scalar(std::declval<CopyRefQual<Scalar<Vectors>,
+ Vectors> >()...));
+
+ //////////////////////////////////////////////////////////////////////
+ //
+ // Check associated types
+ //
+
+ template<class V>
+ void checkScalar()
+ {
+ // check that the type Scalar<V> exists
+ using T = Scalar<V>;
+
+ static_assert(std::is_same<T, std::decay_t<T> >::value, "Scalar types "
+ "must not be references, and must not include "
+ "cv-qualifiers");
+ [[maybe_unused]] T a{};
+ }
+
+ template<class V>
+ [[deprecated("Warning: please include bool in the Rebinds for "
+ "simd type V, as Masks are not checked otherwise.")]]
+ void warnMissingMaskRebind(std::true_type) {}
+ template<class V>
+ void warnMissingMaskRebind(std::false_type) {}
+
+ template<class V, class Rebinds, template<class> class RebindPrune,
+ template<class> class RebindAccept, class Recurse>
+ void checkRebindOf(Recurse recurse)
+ {
+ Hybrid::forEach(Rebinds{}, [this,recurse](auto target) {
+ using T = typename decltype(target)::type;
+
+ // check that the rebound type exists
+ using W = Rebind<T, V>;
+ log_ << "Type " << className<V>() << " rebound to "
+ << className<T>() << " is " << className<W>() << std::endl;
+
+ static_assert(std::is_same<W, std::decay_t<W> >::value, "Rebound "
+ "types must not be references, and must not include "
+ "cv-qualifiers");
+ static_assert(lanes<V>() == lanes<W>(), "Rebound types must have "
+ "the same number of lanes as the original vector "
+ "types");
+ static_assert(std::is_same<T, Scalar<W> >::value, "Rebound types "
+ "must have the bound-to scalar type");
+
+ if constexpr (RebindPrune<W>{}) {
+ log_ << "Pruning check of Simd type " << className<W>()
+ << std::endl;
+ }
+ else {
+ using Impl::debugTypes;
+ static_assert(debugTypes<T, V, W>(RebindAccept<W>{}),
+ "Rebind<T, V> is W, but that is not accepted "
+ "by RebindAccept");
+ recurse(MetaType<W>{});
+ }
+ });
+
+ static_assert(std::is_same<Rebind<Scalar<V>, V>, V>::value, "A type "
+ "rebound to its own scalar type must be the same type "
+ "as the original type");
+ static_assert(std::is_same<Rebind<bool, V>, Mask<V> >::value, "A type "
+ "rebound to bool must be the mask type for that type");
+
+ constexpr bool hasBool = Impl::TypeInList<bool, Rebinds>::value;
+ warnMissingMaskRebind<V>(Std::bool_constant<!hasBool>{});
+ }
+
+ //////////////////////////////////////////////////////////////////////
+ //
+ // Fundamental checks
+ //
+
+ template<class V>
+ void checkLanes()
+ {
+ // check lanes
+ static_assert(std::is_same<std::size_t, decltype(lanes<V>())>::value,
+ "return type of lanes<V>() should be std::size_t");
+ static_assert(std::is_same<std::size_t, decltype(lanes(V{}))>::value,
+ "return type of lanes(V{}) should be std::size_t");
+
+ // the result of lanes<V>() must be constexpr
+ [[maybe_unused]] constexpr auto size = lanes<V>();
+ // but the result of lanes(vec) does not need to be constexpr
+ DUNE_SIMD_CHECK(lanes<V>() == lanes(V{}));
+ }
+
+ template<class V>
+ void checkDefaultConstruct()
+ {
+ { [[maybe_unused]] V vec; }
+ { [[maybe_unused]] V vec{}; }
+ { [[maybe_unused]] V vec = {}; }
+ }
+
+ template<class V>
+ void checkLane()
+ {
+ // Avoid using broadcast<V>() for initialization to avoid test
+ // interdependencies.
+ V vec(Scalar<V>(0));
+ // check lane() on mutable lvalues
+ for(std::size_t l = 0; l < lanes(vec); ++l)
+ lane(l, vec) = l + 1;
+ for(std::size_t l = 0; l < lanes(vec); ++l)
+ DUNE_SIMD_CHECK(lane(l, vec) == Scalar<V>(l + 1));
+ using MLRes = decltype(lane(0, vec));
+ static_assert(std::is_same<MLRes, Scalar<V>&>::value ||
+ std::is_same<MLRes, std::decay_t<MLRes> >::value,
+ "Result of lane() on a mutable lvalue vector must "
+ "either be a mutable reference to a scalar of that "
+ "vector or a proxy object (which itself may not be a "
+ "reference nor const).");
+
+ // check lane() on const lvalues
+ const V &vec2 = vec;
+ for(std::size_t l = 0; l < lanes(vec); ++l)
+ DUNE_SIMD_CHECK(lane(l, vec2) == Scalar<V>(l + 1));
+ using CLRes = decltype(lane(0, vec2));
+ static_assert(std::is_same<CLRes, const Scalar<V>&>::value ||
+ std::is_same<CLRes, std::decay_t<CLRes> >::value,
+ "Result of lane() on a const lvalue vector must "
+ "either be a const lvalue reference to a scalar of that "
+ "vector or a proxy object (which itself may not be a "
+ "reference nor const).");
+ static_assert(!std::is_assignable<CLRes, Scalar<V> >::value,
+ "Result of lane() on a const lvalue vector must not be "
+ "assignable from a scalar.");
+
+ // check lane() on rvalues
+ for(std::size_t l = 0; l < lanes(vec); ++l)
+ DUNE_SIMD_CHECK(lane(l, prvalue(vec)) == Scalar<V>(l + 1));
+ using RRes = decltype(lane(0, prvalue(vec)));
+ // TODO: do we really want to allow Scalar<V>&& here? If we allow it,
+ // then `auto &&res = lane(0, vec*vec);` creates a dangling reference,
+ // and the scalar (and even the vector types) are small enough to be
+ // passed in registers anyway. On the other hand, the only comparable
+ // accessor function in the standard library that I can think of is
+ // std::get(), and that does return an rvalue reference in this
+ // situation. However, that cannot assume anything about the size of
+ // the returned types.
+ static_assert(std::is_same<RRes, Scalar<V> >::value ||
+ std::is_same<RRes, Scalar<V>&&>::value,
+ "Result of lane() on a rvalue vector V must be "
+ "Scalar<V> or Scalar<V>&&.");
+ // Can't assert non-assignable, fails for any typical class,
+ // e.g. std::complex<>. Would need to return const Scalar<V> or const
+ // Scalar<V>&&, which would inhibit moving from the return value.
+ // static_assert(!std::is_assignable<RRes, Scalar<V> >::value,
+ // "Result of lane() on a rvalue vector must not be "
+ // "assignable from a scalar.");
+ }
+
+ // check non-default constructors
+ template<class V>
+ void checkCopyMoveConstruct()
+ {
+ // elided copy/move constructors
+ { V vec (make123<V>()); DUNE_SIMD_CHECK(is123(vec)); }
+ { V vec = make123<V>() ; DUNE_SIMD_CHECK(is123(vec)); }
+ { V vec {make123<V>()}; DUNE_SIMD_CHECK(is123(vec)); }
+ { V vec = {make123<V>()}; DUNE_SIMD_CHECK(is123(vec)); }
+
+ // copy constructors
+ { V ref(make123<V>()); V vec (ref);
+ DUNE_SIMD_CHECK(is123(vec)); DUNE_SIMD_CHECK(is123(ref)); }
+ { V ref(make123<V>()); V vec = ref ;
+ DUNE_SIMD_CHECK(is123(vec)); DUNE_SIMD_CHECK(is123(ref)); }
+ { V ref(make123<V>()); V vec {ref};
+ DUNE_SIMD_CHECK(is123(vec)); DUNE_SIMD_CHECK(is123(ref)); }
+ { V ref(make123<V>()); V vec = {ref};
+ DUNE_SIMD_CHECK(is123(vec)); DUNE_SIMD_CHECK(is123(ref)); }
+ { const V ref(make123<V>()); V vec (ref);
+ DUNE_SIMD_CHECK(is123(vec)); }
+ { const V ref(make123<V>()); V vec = ref ;
+ DUNE_SIMD_CHECK(is123(vec)); }
+ { const V ref(make123<V>()); V vec {ref};
+ DUNE_SIMD_CHECK(is123(vec)); }
+ { const V ref(make123<V>()); V vec = {ref};
+ DUNE_SIMD_CHECK(is123(vec)); }
+
+ // move constructors
+ { V ref(make123<V>()); V vec (std::move(ref));
+ DUNE_SIMD_CHECK(is123(vec)); }
+ { V ref(make123<V>()); V vec = std::move(ref) ;
+ DUNE_SIMD_CHECK(is123(vec)); }
+ { V ref(make123<V>()); V vec {std::move(ref)};
+ DUNE_SIMD_CHECK(is123(vec)); }
+ { V ref(make123<V>()); V vec = {std::move(ref)};
+ DUNE_SIMD_CHECK(is123(vec)); }
+ }
+
+ template<class V>
+ void checkBroadcastVectorConstruct()
+ {
+ // broadcast copy constructors
+ { Scalar<V> ref = 42; V vec (ref);
+ DUNE_SIMD_CHECK(is42(vec)); DUNE_SIMD_CHECK(ref == Scalar<V>(42)); }
+ { Scalar<V> ref = 42; V vec = ref ;
+ DUNE_SIMD_CHECK(is42(vec)); DUNE_SIMD_CHECK(ref == Scalar<V>(42)); }
+ // { Scalar<V> ref = 42; V vec {ref};
+ // DUNE_SIMD_CHECK(is42(vec)); DUNE_SIMD_CHECK(ref == Scalar<V>(42)); }
+ // { Scalar<V> ref = 42; V vec = {ref};
+ // DUNE_SIMD_CHECK(is42(vec)); DUNE_SIMD_CHECK(ref == Scalar<V>(42)); }
+ { const Scalar<V> ref = 42; V vec (ref);
+ DUNE_SIMD_CHECK(is42(vec)); }
+ { const Scalar<V> ref = 42; V vec = ref ;
+ DUNE_SIMD_CHECK(is42(vec)); }
+ // { const Scalar<V> ref = 42; V vec {ref};
+ // DUNE_SIMD_CHECK(is42(vec)); }
+ // { const Scalar<V> ref = 42; V vec = {ref};
+ // DUNE_SIMD_CHECK(is42(vec)); }
+
+ // broadcast move constructors
+ { Scalar<V> ref = 42; V vec (std::move(ref));
+ DUNE_SIMD_CHECK(is42(vec)); }
+ { Scalar<V> ref = 42; V vec = std::move(ref) ;
+ DUNE_SIMD_CHECK(is42(vec)); }
+ // { Scalar<V> ref = 42; V vec {std::move(ref)};
+ // DUNE_SIMD_CHECK(is42(vec)); }
+ // { Scalar<V> ref = 42; V vec = {std::move(ref)};
+ // DUNE_SIMD_CHECK(is42(vec)); }
+ }
+
+ template<class V>
+ void checkBroadcastMaskConstruct()
+ {
+ // broadcast copy constructors
+ { Scalar<V> ref = 42; V vec (ref);
+ DUNE_SIMD_CHECK(is42(vec)); DUNE_SIMD_CHECK(ref == Scalar<V>(42)); }
+ // { Scalar<V> ref = 42; V vec = ref ;
+ // DUNE_SIMD_CHECK(is42(vec)); DUNE_SIMD_CHECK(ref == Scalar<V>(42)); }
+ { Scalar<V> ref = 42; V vec {ref};
+ DUNE_SIMD_CHECK(is42(vec)); DUNE_SIMD_CHECK(ref == Scalar<V>(42)); }
+ // { Scalar<V> ref = 42; V vec = {ref};
+ // DUNE_SIMD_CHECK(is42(vec)); DUNE_SIMD_CHECK(ref == Scalar<V>(42)); }
+ { const Scalar<V> ref = 42; V vec (ref);
+ DUNE_SIMD_CHECK(is42(vec)); }
+ // { const Scalar<V> ref = 42; V vec = ref ;
+ // DUNE_SIMD_CHECK(is42(vec)); }
+ { const Scalar<V> ref = 42; V vec {ref};
+ DUNE_SIMD_CHECK(is42(vec)); }
+ // { const Scalar<V> ref = 42; V vec = {ref};
+ // DUNE_SIMD_CHECK(is42(vec)); }
+
+ // broadcast move constructors
+ { Scalar<V> ref = 42; V vec (std::move(ref));
+ DUNE_SIMD_CHECK(is42(vec)); }
+ // { Scalar<V> ref = 42; V vec = std::move(ref) ;
+ // DUNE_SIMD_CHECK(is42(vec)); }
+ { Scalar<V> ref = 42; V vec {std::move(ref)};
+ DUNE_SIMD_CHECK(is42(vec)); }
+ // { Scalar<V> ref = 42; V vec = {std::move(ref)};
+ // DUNE_SIMD_CHECK(is42(vec)); }
+ }
+
+ // check the implCast function
+ template<class FromV, class ToV>
+ void checkImplCast()
+ {
+ { // lvalue arg
+ FromV fromVec = make123<FromV>();
+ auto toVec = implCast<ToV>(fromVec);
+ static_assert(std::is_same<decltype(toVec), ToV>::value,
+ "Unexpected result type for implCast<ToV>(FromV&)");
+ DUNE_SIMD_CHECK(is123(fromVec));
+ DUNE_SIMD_CHECK(is123(toVec));
+ }
+
+ { // const lvalue arg
+ const FromV fromVec = make123<FromV>();
+ auto toVec = implCast<ToV>(fromVec);
+ static_assert(std::is_same<decltype(toVec), ToV>::value,
+ "Unexpected result type for implCast<ToV>(const "
+ "FromV&)");
+ DUNE_SIMD_CHECK(is123(toVec));
+ }
+
+ { // rvalue arg
+ auto toVec = implCast<ToV>(make123<FromV>());
+ static_assert(std::is_same<decltype(toVec), ToV>::value,
+ "Unexpected result type for implCast<ToV>(FromV&&)");
+ DUNE_SIMD_CHECK(is123(toVec));
+ }
+ }
+
+ // check the implCast function
+ template<class V>
+ void checkImplCast()
+ {
+ // check against LoopSIMD
+ using LoopV = Dune::LoopSIMD<Scalar<V>, lanes<V>()>;
+
+ checkImplCast<V, V>();
+ checkImplCast<V, LoopV>();
+ checkImplCast<LoopV, V>();
+ }
+
+ // check the broadcast function
+ template<class V>
+ void checkBroadcast()
+ {
+ // broadcast function
+ { // lvalue arg
+ Scalar<V> ref = 42;
+ auto vec = broadcast<V>(ref);
+ static_assert(std::is_same<decltype(vec), V>::value,
+ "Unexpected result type for broadcast<V>()");
+ DUNE_SIMD_CHECK(is42(vec));
+ DUNE_SIMD_CHECK(ref == Scalar<V>(42));
+ }
+
+ { // const lvalue arg
+ const Scalar<V> ref = 42;
+ auto vec = broadcast<V>(ref);
+ static_assert(std::is_same<decltype(vec), V>::value,
+ "Unexpected result type for broadcast<V>()");
+ DUNE_SIMD_CHECK(is42(vec));
+ }
+
+ { // rvalue arg
+ auto vec = broadcast<V>(Scalar<V>(42));
+ static_assert(std::is_same<decltype(vec), V>::value,
+ "Unexpected result type for broadcast<V>()");
+ DUNE_SIMD_CHECK(is42(vec));
+ }
+
+ { // int arg
+ auto vec = broadcast<V>(42);
+ static_assert(std::is_same<decltype(vec), V>::value,
+ "Unexpected result type for broadcast<V>()");
+ DUNE_SIMD_CHECK(is42(vec));
+ }
+
+ { // double arg
+ auto vec = broadcast<V>(42.0);
+ static_assert(std::is_same<decltype(vec), V>::value,
+ "Unexpected result type for broadcast<V>()");
+ DUNE_SIMD_CHECK(is42(vec));
+ }
+ }
+
+ template<class V>
+ void checkBracedAssign()
+ {
+ // copy assignment
+ { V ref = make123<V>(); V vec; vec = {ref};
+ DUNE_SIMD_CHECK(is123(vec)); DUNE_SIMD_CHECK(is123(ref)); }
+ { const V ref = make123<V>(); V vec; vec = {ref};
+ DUNE_SIMD_CHECK(is123(vec)); DUNE_SIMD_CHECK(is123(ref)); }
+
+ // move assignment
+ { V vec; vec = {make123<V>()}; DUNE_SIMD_CHECK(is123(vec)); }
+ }
+
+ template<class V>
+ void checkBracedBroadcastAssign()
+ {
+ // nothing works here
+ // // broadcast copy assignment
+ // { Scalar<V> ref = 42; V vec; vec = {ref};
+ // DUNE_SIMD_CHECK(is42(vec)); DUNE_SIMD_CHECK(ref == Scalar<V>(42)); }
+ // { const Scalar<V> ref = 42; V vec; vec = {ref};
+ // DUNE_SIMD_CHECK(is42(vec)); }
+
+ // // broadcast move assignment
+ // { Scalar<V> ref = 42; V vec; vec = {std::move(ref)};
+ // DUNE_SIMD_CHECK(is42(vec)); }
+ }
+
+ //////////////////////////////////////////////////////////////////////
+ //
+ // checks for unary operators
+ //
+
+#define DUNE_SIMD_POSTFIX_OP(NAME, SYMBOL) \
+ struct OpPostfix##NAME \
+ { \
+ template<class V> \
+ auto operator()(V&& v) const \
+ -> decltype(std::forward<V>(v) SYMBOL) \
+ { \
+ return std::forward<V>(v) SYMBOL; \
+ } \
+ }
+
+#define DUNE_SIMD_PREFIX_OP(NAME, SYMBOL) \
+ struct OpPrefix##NAME \
+ { \
+ template<class V> \
+ auto operator()(V&& v) const \
+ -> decltype(SYMBOL std::forward<V>(v)) \
+ { \
+ return SYMBOL std::forward<V>(v); \
+ } \
+ }
+
+ DUNE_SIMD_POSTFIX_OP(Decrement, -- );
+ DUNE_SIMD_POSTFIX_OP(Increment, ++ );
+
+ DUNE_SIMD_PREFIX_OP (Decrement, -- );
+ DUNE_SIMD_PREFIX_OP (Increment, ++ );
+
+ DUNE_SIMD_PREFIX_OP (Plus, + );
+ DUNE_SIMD_PREFIX_OP (Minus, - );
+ DUNE_SIMD_PREFIX_OP (LogicNot, ! );
+ // Do not warn about ~ being applied to bool. (1) Yes, doing that is
+ // weird, but we do want to test the weird stuff too. (2) It avoids
+ // running into <https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82040> on
+ // g++-7.0 through 7.2. Also, ignore -Wpragmas to not warn about an
+ // unknown -Wbool-operation on compilers that do not know that option.
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wpragmas"
+#pragma GCC diagnostic ignored "-Wunknown-warning-option" // clang 6.0.1
+#pragma GCC diagnostic ignored "-Wbool-operation"
+ DUNE_SIMD_PREFIX_OP (BitNot, ~ );
+#pragma GCC diagnostic pop
+
+#undef DUNE_SIMD_POSTFIX_OP
+#undef DUNE_SIMD_PREFIX_OP
+
+ template<class V, class Op>
+ std::enable_if_t<
+ CanCall<Op(decltype(lane(0, std::declval<V>())))>::value>
+ checkUnaryOpV(Op op)
+ {
+#define DUNE_SIMD_OPNAME (className<Op(V)>())
+ // arguments
+ auto val = leftVector<std::decay_t<V>>();
+
+ // copy the arguments in case V is a references
+ auto arg = val;
+ auto &&result = op(static_cast<V>(arg));
+ using T = Scalar<std::decay_t<decltype(result)> >;
+ for(std::size_t l = 0; l < lanes(val); ++l)
+ {
+ // `op` might promote the argument. This is a problem if the
+ // argument of the operation on the right of the `==` is
+ // e.g. `(unsigned short)1` and the operation is e.g. unary `-`.
+ // Then the argument is promoted to `int` before applying the
+ // negation, and the result is `(int)-1`. However, the left side of
+ // the `==` is still `(unsigned short)-1`, which typically is the
+ // same as `(unsigned short)65535`. The `==` promotes the left side
+ // before comparing, so that becomes `(int)65535`. It will then
+ // compare `(int)65535` and `(int)-1` and rightly declare them to be
+ // not equal.
+
+ // To work around this, we explicitly convert the right side of the
+ // `==` to the scalar type before comparing.
+ DUNE_SIMD_CHECK_OP
+ (lane(l, result)
+ == static_cast<T>(op(lane(l, static_cast<V>(val)))));
+ }
+ // op might modify val, verify that any such modification also happens
+ // in the vector case
+ for(std::size_t l = 0; l < lanes<std::decay_t<V> >(); ++l)
+ DUNE_SIMD_CHECK_OP(lane(l, val) == lane(l, arg));
+#undef DUNE_SIMD_OPNAME
+ }
+
+ template<class V, class Op>
+ std::enable_if_t<
+ !CanCall<Op(decltype(lane(0, std::declval<V>())))>::value>
+ checkUnaryOpV(Op op)
+ {
+ // log_ << "No " << className<Op(decltype(lane(0, std::declval<V>())))>()
+ // << std::endl
+ // << " ==> Not checking " << className<Op(V)>() << std::endl;
+ }
+
+ template<class V, class Op>
+ void checkUnaryOpsV(Op op)
+ {
+ checkUnaryOpV<V&>(op);
+ checkUnaryOpV<const V&>(op);
+ checkUnaryOpV<V&&>(op);
+ }
+
+ //////////////////////////////////////////////////////////////////////
+ //
+ // checks for binary operators
+ //
+
+ // The operators contain an `operator()`, which will be invoked for both
+ // scalar and vector arguments. The function `scalar()` is used the
+ // test whether the scalar types support the operation (via
+ // `ScalarResult`). The difference is that `scalar()` should only ever
+ // receive `const`-ref-qualified version of `Scalar<V>`, while the
+ // `operator()` may also be called with proxies representing scalars.
+#define DUNE_SIMD_INFIX_OP(NAME, SYMBOL) \
+ struct OpInfix##NAME \
+ { \
+ template<class V1, class V2> \
+ decltype(auto) operator()(V1&& v1, V2&& v2) const \
+ { \
+ return std::forward<V1>(v1) SYMBOL std::forward<V2>(v2); \
+ } \
+ template<class S1, class S2> \
+ auto scalar(S1&& s1, S2&& s2) const \
+ -> decltype(std::forward<S1>(s1) SYMBOL std::forward<S2>(s2)); \
+ }
+
+ // for assign ops, accept only non-const lvalue arguments for scalars.
+ // This is needed for class scalars (e.g. std::complex) because
+ // non-const class rvalues are actually usually assignable. Though that
+ // assignment happens to a temporary, and thus is lost. Except that the
+ // tests would bind the result of the assignment to a reference. And
+ // because that result is returned from a function by reference, even
+ // though it is a temporary passed as an argument to that function,
+ // accessing the result later is undefined behaviour.
+#define DUNE_SIMD_ASSIGN_OP(NAME, SYMBOL) \
+ struct OpInfix##NAME \
+ { \
+ template<class V1, class V2> \
+ decltype(auto) operator()(V1&& v1, V2&& v2) const \
+ { \
+ return std::forward<V1>(v1) SYMBOL std::forward<V2>(v2); \
+ } \
+ template<class S1, class S2> \
+ auto scalar(S1& s1, S2&& s2) const \
+ -> decltype(s1 SYMBOL std::forward<S2>(s2)); \
+ }
+
+#define DUNE_SIMD_REPL_OP(NAME, REPLFN, SYMBOL) \
+ struct OpInfix##NAME \
+ { \
+ template<class V1, class V2> \
+ decltype(auto) operator()(V1&& v1, V2&& v2) const \
+ { \
+ return Simd::REPLFN(std::forward<V1>(v1), std::forward<V2>(v2)); \
+ } \
+ template<class S1, class S2> \
+ auto scalar(S1&& s1, S2&& s2) const \
+ -> decltype(std::forward<S1>(s1) SYMBOL std::forward<S2>(s2)); \
+ }
+
+ DUNE_SIMD_INFIX_OP(Mul, * );
+ DUNE_SIMD_INFIX_OP(Div, / );
+ DUNE_SIMD_INFIX_OP(Remainder, % );
+
+ DUNE_SIMD_INFIX_OP(Plus, + );
+ DUNE_SIMD_INFIX_OP(Minus, - );
+
+ DUNE_SIMD_INFIX_OP(LeftShift, << );
+ DUNE_SIMD_INFIX_OP(RightShift, >> );
+
+ DUNE_SIMD_INFIX_OP(Less, < );
+ DUNE_SIMD_INFIX_OP(Greater, > );
+ DUNE_SIMD_INFIX_OP(LessEqual, <= );
+ DUNE_SIMD_INFIX_OP(GreaterEqual, >= );
+
+ DUNE_SIMD_INFIX_OP(Equal, == );
+ DUNE_SIMD_INFIX_OP(NotEqual, != );
+
+ DUNE_SIMD_INFIX_OP(BitAnd, & );
+ DUNE_SIMD_INFIX_OP(BitXor, ^ );
+ DUNE_SIMD_INFIX_OP(BitOr, | );
+
+ // Those are not supported in any meaningful way by vectorclass
+ // We need to test replacement functions maskAnd() and maskOr() instead.
+ DUNE_SIMD_REPL_OP(LogicAnd, maskAnd, && );
+ DUNE_SIMD_REPL_OP(LogicOr, maskOr, || );
+
+ DUNE_SIMD_ASSIGN_OP(Assign, = );
+ DUNE_SIMD_ASSIGN_OP(AssignMul, *= );
+ DUNE_SIMD_ASSIGN_OP(AssignDiv, /= );
+ DUNE_SIMD_ASSIGN_OP(AssignRemainder, %= );
+ DUNE_SIMD_ASSIGN_OP(AssignPlus, += );
+ DUNE_SIMD_ASSIGN_OP(AssignMinus, -= );
+ DUNE_SIMD_ASSIGN_OP(AssignLeftShift, <<=);
+ DUNE_SIMD_ASSIGN_OP(AssignRightShift, >>=);
+ DUNE_SIMD_ASSIGN_OP(AssignAnd, &= );
+ DUNE_SIMD_ASSIGN_OP(AssignXor, ^= );
+ DUNE_SIMD_ASSIGN_OP(AssignOr, |= );
+
+#undef DUNE_SIMD_INFIX_OP
+#undef DUNE_SIMD_REPL_OP
+#undef DUNE_SIMD_ASSIGN_OP
+
+ // just used as a tag
+ struct OpInfixComma {};
+
+ template<class T1, class T2>
+ void checkCommaOp(const std::decay_t<T1> &val1,
+ const std::decay_t<T2> &val2)
+ {
+#define DUNE_SIMD_OPNAME (className<OpInfixComma(T1, T2)>())
+ static_assert(std::is_same<decltype((std::declval<T1>(),
+ std::declval<T2>())), T2>::value,
+ "Type and value category of the comma operator must "
+ "match that of the second operand");
+
+ // copy the arguments in case T1 or T2 are references
+ auto arg1 = val1;
+ auto arg2 = val2;
+ // Do not warn that the left side of the comma operator is unused.
+ // Seems to work for g++-4.9 and clang++-3.8. Appears to be harmless
+ // for icpc (14 and 17), and icpc does not seem to issue a warning
+ // anyway.
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-value"
+ auto &&result = (static_cast<T1>(arg1),
+ static_cast<T2>(arg2));
+#pragma GCC diagnostic pop
+ if(std::is_reference<T2>::value)
+ {
+ // comma should return the same object as the second argument for
+ // lvalues and xvalues
+ DUNE_SIMD_CHECK_OP(&result == &arg2);
+ // it should not modify any arguments
+ DUNE_SIMD_CHECK_OP(allTrue(val1 == arg1));
+ DUNE_SIMD_CHECK_OP(allTrue(val2 == arg2));
+ }
+ else
+ {
+ // comma should return the same value as the second argument for
+ // prvalues
+ DUNE_SIMD_CHECK_OP(allTrue(result == arg2));
+ // it should not modify any arguments
+ DUNE_SIMD_CHECK_OP(allTrue(val1 == arg1));
+ // second argument is a prvalue, any modifications happen to a
+ // temporary and we can't detect them
+ }
+#undef DUNE_SIMD_OPNAME
+ }
+
+ //////////////////////////////////////////////////////////////////////
+ //
+ // checks for vector-vector binary operations
+ //
+
+ // We check the following candidate operation
+ //
+ // vopres = vop1 @ vop2
+ //
+ // against the reference operation
+ //
+ // arefres[l] = aref1[l] @ aref2[l] foreach l
+ //
+ // v... variables are simd-vectors and a... variables are arrays. The
+ // operation may modify the operands, but if is does the modification
+ // needs to happen in both the candidate and the reference.
+ //
+ // We do the following checks:
+ // 1. lanes(vopres) == lanes(vop1)
+ // 2. lane(l, vopres) == arefres[l] foreach l
+ // 3. lane(l, vop1) == aref1[l] foreach l
+ // 4. lane(l, vop2) == aref2[l] foreach l
+ template<class V1, class V2, class Op>
+ std::enable_if_t<Std::is_detected_v<ScalarResult, Op, V1, V2> >
+ checkBinaryOpVV(MetaType<V1>, MetaType<V2>, Op op)
+ {
+#define DUNE_SIMD_OPNAME (className<Op(V1, V2)>())
+ static_assert(std::is_same<std::decay_t<V1>, std::decay_t<V2> >::value,
+ "Internal testsystem error: called with two types that "
+ "don't decay to the same thing");
+
+ // reference arguments
+ auto vref1 = leftVector<std::decay_t<V1>>();
+ auto vref2 = rightVector<std::decay_t<V2>>();
+
+ // candidate arguments
+ auto vop1 = vref1;
+ auto vop2 = vref2;
+
+ // candidate operation
+ auto &&vopres = op(static_cast<V1>(vop1), static_cast<V2>(vop2));
+ using VR = decltype(vopres);
+
+ // check 1. lanes(vopres) == lanes(vop1)
+ static_assert(lanes<std::decay_t<VR> >() == lanes<std::decay_t<V1> >(),
+ "The result must have the same number of lanes as the "
+ "operands.");
+
+ // do the reference operation, and simultaneously
+ // check 2. lane(l, vopres) == arefres[l] foreach l
+ using T = Scalar<std::decay_t<VR> >;
+ for(auto l : range(lanes(vopres)))
+ {
+ // see the lengthy comment in `checkUnaryOpV()` as to why the
+ // `static_cast` around the `op()` is necessary
+ DUNE_SIMD_CHECK_OP
+ (lane(l, vopres)
+ == static_cast<T>(op(lane(l, static_cast<V1>(vref1)),
+ lane(l, static_cast<V2>(vref2)))));
+ }
+
+ // check 3. lane(l, vop1) == aref1[l] foreach l
+ for(auto l : range(lanes(vop1)))
+ DUNE_SIMD_CHECK_OP(lane(l, vop1) == lane(l, vref1));
+
+ // check 4. lane(l, vop2) == aref2[l] foreach l
+ for(auto l : range(lanes(vop2)))
+ DUNE_SIMD_CHECK_OP(lane(l, vop2) == lane(l, vref2));
+
+#undef DUNE_SIMD_OPNAME
+ }
+
+ template<class V1, class V2, class Op>
+ std::enable_if_t<!Std::is_detected_v<ScalarResult, Op, V1, V2> >
+ checkBinaryOpVV(MetaType<V1>, MetaType<V2>, Op op)
+ {
+ // log_ << "No " << className<Op(decltype(lane(0, std::declval<V1>())),
+ // decltype(lane(0, std::declval<V2>())))>()
+ // << std::endl
+ // << " ==> Not checking " << className<Op(V1, V2)>() << std::endl;
+ }
+
+ template<class V1, class V2>
+ void checkBinaryOpVV(MetaType<V1>, MetaType<V2>, OpInfixComma)
+ {
+ static_assert(std::is_same<std::decay_t<V1>, std::decay_t<V2> >::value,
+ "Internal testsystem error: called with two types that "
+ "don't decay to the same thing");
+
+ checkCommaOp<V1, V2>(leftVector<std::decay_t<V1>>(),
+ rightVector<std::decay_t<V2>>());
+ }
+
+ //////////////////////////////////////////////////////////////////////
+ //
+ // checks for vector-scalar binary operations
+ //
+
+ // We check the following candidate operation
+ //
+ // vopres = vop1 @ sop2
+ //
+ // against the reference operation
+ //
+ // arefres[l] = aref1[l] @ sref2 foreach l
+ //
+ // v... variables are simd-vectors, a... variables are arrays, and
+ // s... variables are scalars. The operation may modify the left
+ // operand, but if is does the modifications needs to happen in both the
+ // candidate and the reference.
+ //
+ // We do the following checks:
+ // 1. lanes(vopres) == lanes(vop1)
+ // 2. lane(l, vopres) == arefres[l] foreach l
+ // 3. lane(l, vop1) == aref1[l] foreach l
+ // 4. sop2 is never modified
+ // 5. sref2 is never modified
+ //
+ // In fact, if the property "sref2 is never modified" is violated that
+ // means the operation is unsuitable for an automatic broadcast of the
+ // second operand and should not be checked. There are no operations in
+ // the standard where the second operand is modified like this, but
+ // there are operations where the first operand is modified -- and this
+ // check is used for those ops as well by exchanging the first and second
+ // argument below.
+
+ template<class V1, class T2, class Op>
+ std::enable_if_t<Std::is_detected_v<ScalarResult, Op, V1, T2> >
+ checkBinaryOpVS(MetaType<V1>, MetaType<T2>, Op op)
+ {
+#define DUNE_SIMD_OPNAME (className<Op(V1, T2)>())
+ static_assert(std::is_same<Scalar<std::decay_t<V1> >,
+ std::decay_t<T2> >::value,
+ "Internal testsystem error: called with a scalar that "
+ "does not match the vector type.");
+
+ // initial values
+ auto sinit2 = rightScalar<std::decay_t<T2>>();
+
+ // reference arguments
+ auto vref1 = leftVector<std::decay_t<V1>>();
+ auto sref2 = sinit2;
+
+ // candidate arguments
+ auto vop1 = vref1;
+ auto sop2 = sref2;
+
+ // candidate operation
+ auto &&vopres = op(static_cast<V1>(vop1), static_cast<T2>(sop2));
+ using VR = decltype(vopres);
+
+ // check 1. lanes(vopres) == lanes(vop1)
+ static_assert(lanes<std::decay_t<VR> >() == lanes<std::decay_t<V1> >(),
+ "The result must have the same number of lanes as the "
+ "operands.");
+
+ // check 4. sop2 is never modified
+ DUNE_SIMD_CHECK_OP(sop2 == sinit2);
+
+ // do the reference operation, and simultaneously check 2. and 5.
+ using T = Scalar<std::decay_t<decltype(vopres)> >;
+ for(auto l : range(lanes(vopres)))
+ {
+ // check 2. lane(l, vopres) == arefres[l] foreach l
+ // see the lengthy comment in `checkUnaryOpV()` as to why the
+ // `static_cast` around the `op()` is necessary
+ DUNE_SIMD_CHECK_OP
+ (lane(l, vopres)
+ == static_cast<T>(op(lane(l, static_cast<V1>(vref1)),
+ static_cast<T2>(sref2) )));
+ // check 5. sref2 is never modified
+ DUNE_SIMD_CHECK_OP(sref2 == sinit2);
+ }
+
+ // check 3. lane(l, vop1) == aref1[l] foreach l
+ for(auto l : range(lanes(vop1)))
+ DUNE_SIMD_CHECK_OP(lane(l, vop1) == lane(l, vref1));
+
+#undef DUNE_SIMD_OPNAME
+ }
+
+ template<class V1, class T2, class Op>
+ std::enable_if_t<!Std::is_detected_v<ScalarResult, Op, V1, T2> >
+ checkBinaryOpVS(MetaType<V1>, MetaType<T2>, Op op)
+ {
+ // log_ << "No "
+ // << className<Op(decltype(lane(0, std::declval<V1>())), T2)>()
+ // << std::endl
+ // << " ==> Not checking " << className<Op(V1, T2)>() << std::endl;
+ }
+
+ template<class V1, class T2>
+ void checkBinaryOpVS(MetaType<V1>, MetaType<T2>, OpInfixComma)
+ {
+ static_assert(std::is_same<Scalar<std::decay_t<V1> >,
+ std::decay_t<T2> >::value,
+ "Internal testsystem error: called with a scalar that "
+ "does not match the vector type.");
+
+ checkCommaOp<V1, T2>(leftVector<std::decay_t<V1>>(),
+ rightScalar<std::decay_t<T2>>());
+ }
+
+ //////////////////////////////////////////////////////////////////////
+ //
+ // cross-check scalar-vector binary operations against vector-vector
+ //
+
+ // We check the following candidate operation
+ //
+ // vopres = vop1 @ vop2, where vop2 = broadcast(sref2)
+ //
+ // against the reference operation
+ //
+ // vrefres = vref1 @ sref2
+ //
+ // v... variables are simd-vectors, a... variables are arrays, and
+ // s... variables are scalars.
+ //
+ // We could check the following properties
+ // 1. lanes(vopres) == lanes(vop1)
+ // 2. lane(l, vopres) == lane(l, vrefres) foreach l
+ // 3. lane(l, vop1) == lane(l, vref1) foreach l
+ // but these are given by checking the operation against the scalar
+ // operation in the vector@vector and vector@scalar cases above.
+ //
+ // The only thing left to check is:
+ // 4. lane(l, vop2) foreach l is never modified
+
+ template<class V1, class T2, class Op>
+ std::enable_if_t<Std::is_detected_v<ScalarResult, Op, V1, T2> >
+ checkBinaryOpVVAgainstVS(MetaType<V1>, MetaType<T2>, Op op)
+ {
+#define DUNE_SIMD_OPNAME (className<Op(V1, T2)>())
+ static_assert(std::is_same<Scalar<std::decay_t<V1> >,
+ std::decay_t<T2> >::value,
+ "Internal testsystem error: called with a scalar that "
+ "does not match the vector type.");
+
+ // initial values
+ auto sinit2 = rightScalar<std::decay_t<T2>>();
+
+ // reference arguments
+ auto vop1 = leftVector<std::decay_t<V1>>();
+ using V2 = CopyRefQual<V1, T2>;
+ std::decay_t<V2> vop2(sinit2);
+
+ // candidate operation
+ op(static_cast<V1>(vop1), static_cast<V2>(vop2));
+
+ // 4. lane(l, vop2) foreach l is never modified
+ for(auto l : range(lanes(vop2)))
+ DUNE_SIMD_CHECK_OP(lane(l, vop2) == sinit2);
+
+#undef DUNE_SIMD_OPNAME
+ }
+
+ template<class V1, class T2, class Op>
+ std::enable_if_t<!Std::is_detected_v<ScalarResult, Op, V1, T2> >
+ checkBinaryOpVVAgainstVS(MetaType<V1>, MetaType<T2>, Op op)
+ {
+ // log_ << "No "
+ // << className<Op(decltype(lane(0, std::declval<V1>())), T2)>()
+ // << std::endl
+ // << " ==> Not checking " << className<Op(V1, T2)>() << std::endl;
+ }
+
+ template<class V1, class T2>
+ void checkBinaryOpVVAgainstVS(MetaType<V1>, MetaType<T2>, OpInfixComma)
+ { }
+
+ //////////////////////////////////////////////////////////////////////
+ //
+ // checks for vector-proxy binary operations
+ //
+
+ // We check the following candidate operation
+ //
+ // vopres = vop1 @ pop2
+ //
+ // against the reference operation
+ //
+ // arefres[l] = aref1[l] @ sref2 foreach l
+ //
+ // v... variables are simd-vectors, a... variables are arrays,
+ // p... variables are proxies of simd-vector entries and s... variables
+ // are scalars. The operation may modify the left operand, but if is
+ // does the modifications needs to happen in both the candidate and the
+ // reference.
+ //
+ // We do the following checks:
+ // 1. lanes(vopres) == lanes(vop1)
+ // 2. lane(l, vopres) == arefres[l] foreach l
+ // 3. lane(l, vop1) == aref1[l] foreach l
+ // 4. pop2 is never modified
+ // 5. sref2 is never modified
+ //
+ // In fact, if the property "sref2 is never modified" is violated that
+ // means the operation is unsuitable for an automatic broadcast of the
+ // second operand and should not be checked. There are no operations in
+ // the standard where the second operand is modified like this, but
+ // there are operations where the first operand is modified -- and this
+ // check is used for those ops as well by exchanging the first and second
+ // argument below.
+
+ template<class V1, class V2, class Op>
+ std::enable_if_t<Std::is_detected_v<ScalarResult, Op, V1, V2> >
+ checkBinaryOpVP(MetaType<V1>, MetaType<V2>, Op op)
+ {
+ using P2 = decltype(lane(0, std::declval<V2>()));
+ using T2 = CopyRefQual<Scalar<V2>, V2>;
+#define DUNE_SIMD_OPNAME (className<Op(V1, P2)>())
+ static_assert(std::is_same<Scalar<V1>, Scalar<V2> >::value,
+ "Internal testsystem error: called with two vector "
+ "types whose scalar types don't match.");
+
+ // initial values
+ auto sinit2 = rightScalar<Scalar<V2>>();
+
+ // reference arguments
+ auto vref1 = leftVector<std::decay_t<V1>>();
+ auto sref2 = sinit2;
+
+ // candidate arguments
+ auto vop1 = vref1;
+ auto vop2 = std::decay_t<V2>(Scalar<V2>(0));
+ lane(0, vop2) = sref2; // pop2 is just a name for `lane(0, vop2)`
+
+ // candidate operation
+ auto &&vopres =
+ op(static_cast<V1>(vop1), lane(0, static_cast<V2>(vop2)));
+ using VR = decltype(vopres);
+
+ // check 1. lanes(vopres) == lanes(vop1)
+ static_assert(lanes<std::decay_t<VR> >() == lanes<std::decay_t<V1> >(),
+ "The result must have the same number of lanes as the "
+ "operands.");
+
+ // check 4. pop2 is never modified
+ DUNE_SIMD_CHECK_OP(lane(0, vop2) == sinit2);
+
+ // do the reference operation, and simultaneously check 2. and 5.
+ using T = Scalar<decltype(vopres)>;
+ for(auto l : range(lanes(vopres)))
+ {
+ // check 2. lane(l, vopres) == arefres[l] foreach l
+ // see the lengthy comment in `checkUnaryOpV()` as to why the
+ // `static_cast` around the `op()` is necessary
+ DUNE_SIMD_CHECK_OP
+ (lane(l, vopres)
+ == static_cast<T>(op(lane(l, static_cast<V1>(vref1)),
+ static_cast<T2>(sref2) )));
+ // check 5. sref2 is never modified
+ DUNE_SIMD_CHECK_OP(sref2 == sinit2);
+ }
+
+ // check 3. lane(l, vop1) == aref1[l] foreach l
+ for(auto l : range(lanes(vop1)))
+ DUNE_SIMD_CHECK_OP(lane(l, vop1) == lane(l, vref1));
+
+#undef DUNE_SIMD_OPNAME
+ }
+
+ template<class V1, class V2, class Op>
+ std::enable_if_t<!Std::is_detected_v<ScalarResult, Op, V1, V2> >
+ checkBinaryOpVP(MetaType<V1>, MetaType<V2>, Op op)
+ {
+ // log_ << "No "
+ // << className<Op(decltype(lane(0, std::declval<V1>())), T2)>()
+ // << std::endl
+ // << " ==> Not checking " << className<Op(V1, T2)>() << std::endl;
+ }
+
+ template<class V1, class V2>
+ void checkBinaryOpVP(MetaType<V1>, MetaType<V2>, OpInfixComma)
+ {
+ // Don't really know how to check comma operator for proxies
+ }
+
+ //////////////////////////////////////////////////////////////////////
+ //
+ // checks for (scalar/proxy)-vector binary operations
+ //
+
+ template<class Op>
+ struct OpInfixSwappedArgs
+ {
+ Op orig;
+
+ template<class V1, class V2>
+ decltype(auto) operator()(V1&& v1, V2&& v2) const
+ {
+ return orig(std::forward<V2>(v2), std::forward<V1>(v1));
+ }
+ template<class S1, class S2>
+ auto scalar(S1&& s1, S2&& s2) const
+ -> decltype(orig.scalar(std::forward<S2>(s2), std::forward<S1>(s1)));
+ };
+
+ template<class T1, class V2, class Op>
+ void checkBinaryOpSV(MetaType<T1> t1, MetaType<V2> v2, Op op)
+ {
+ checkBinaryOpVS(v2, t1, OpInfixSwappedArgs<Op>{op});
+ }
+
+ template<class T1, class V2>
+ void checkBinaryOpSV(MetaType<T1>, MetaType<V2>, OpInfixComma)
+ {
+ static_assert(std::is_same<std::decay_t<T1>,
+ Scalar<std::decay_t<V2> > >::value,
+ "Internal testsystem error: called with a scalar that "
+ "does not match the vector type.");
+
+ checkCommaOp<T1, V2>(leftScalar<std::decay_t<T1>>(),
+ rightVector<std::decay_t<V2>>());
+ }
+
+ template<class V1, class V2, class Op>
+ void checkBinaryOpPV(MetaType<V1> v1, MetaType<V2> v2, Op op)
+ {
+ checkBinaryOpVP(v2, v1, OpInfixSwappedArgs<Op>{op});
+ }
+
+ template<class V1, class V2>
+ void checkBinaryOpPV(MetaType<V1>, MetaType<V2>, OpInfixComma)
+ {
+ // Don't really know how to check comma operator for proxies
+ }
+
+ //////////////////////////////////////////////////////////////////////
+ //
+ // cross-check scalar-vector binary operations against vector-vector
+ //
+
+ // We check the following candidate operation
+ //
+ // vopres = vop1 @ vop2, where vop2 = broadcast(sref2)
+ //
+ // against the reference operation
+ //
+ // vrefres = vref1 @ sref2
+ //
+ // v... variables are simd-vectors, a... variables are arrays, and
+ // s... variables are scalars.
+ //
+ // We could check the following properties
+ // 1. lanes(vopres) == lanes(vop1)
+ // 2. lane(l, vopres) == lane(l, vrefres) foreach l
+ // 3. lane(l, vop1) == lane(l, vref1) foreach l
+ // but these are given by checking the operation against the scalar
+ // operation in the vector@vector and vector@scalar cases above.
+ //
+ // The only thing left to check is:
+ // 4. lane(l, vop2) foreach l is never modified
+
+ template<class T1, class V2, class Op>
+ void checkBinaryOpVVAgainstSV(MetaType<T1> t1, MetaType<V2> v2, Op op)
+ {
+ checkBinaryOpVVAgainstVS(v2, t1, OpInfixSwappedArgs<Op>{op});
+ }
+
+ template<class V1, class T2>
+ void checkBinaryOpVVAgainstSV(MetaType<V1>, MetaType<T2>, OpInfixComma)
+ { }
+
+ //////////////////////////////////////////////////////////////////////
+ //
+ // Invoke the checks for all combinations
+ //
+
+ template<class T1, class T2, bool condition, class Checker>
+ void checkBinaryRefQual(Checker checker)
+ {
+ if constexpr (condition) {
+ Hybrid::forEach(TypeList<T1&, const T1&, T1&&>{}, [=] (auto t1) {
+ Hybrid::forEach(TypeList<T2&, const T2&, T2&&>{}, [=] (auto t2) {
+ checker(t1, t2);
+ });
+ });
+ }
+ }
+
+ template<class V, class Checker>
+ void checkBinaryOps(Checker checker)
+ {
+ using Std::bool_constant;
+
+ constexpr bool isMask = std::is_same<Scalar<V>, bool>::value;
+
+ constexpr bool do_ = false;
+ constexpr bool do_SV = true;
+ constexpr bool do_VV = true;
+ constexpr bool do_VS = true;
+
+#define DUNE_SIMD_DO(M1, M2, M3, V1, V2, V3, NAME) \
+ checker(bool_constant<isMask ? do_##M1 : do_##V1>{}, \
+ bool_constant<isMask ? do_##M2 : do_##V2>{}, \
+ bool_constant<isMask ? do_##M3 : do_##V3>{}, \
+ Op##NAME{})
+
+ // (Mask , Vector , Name );
+
+ DUNE_SIMD_DO( , , , SV, VV, VS, InfixMul );
+ DUNE_SIMD_DO( , , , SV, VV, VS, InfixDiv );
+ DUNE_SIMD_DO( , , , SV, VV, VS, InfixRemainder );
+
+ DUNE_SIMD_DO( , , , SV, VV, VS, InfixPlus );
+ DUNE_SIMD_DO( , , , SV, VV, VS, InfixMinus );
+
+ DUNE_SIMD_DO( , , , , VV, VS, InfixLeftShift );
+ DUNE_SIMD_DO( , , , , VV, VS, InfixRightShift );
+
+ DUNE_SIMD_DO( , , , SV, VV, VS, InfixLess );
+ DUNE_SIMD_DO( , , , SV, VV, VS, InfixGreater );
+ DUNE_SIMD_DO( , , , SV, VV, VS, InfixLessEqual );
+ DUNE_SIMD_DO( , , , SV, VV, VS, InfixGreaterEqual );
+
+ DUNE_SIMD_DO( , , , SV, VV, VS, InfixEqual );
+ DUNE_SIMD_DO( , , , SV, VV, VS, InfixNotEqual );
+
+ DUNE_SIMD_DO( , VV, , SV, VV, VS, InfixBitAnd );
+ DUNE_SIMD_DO( , VV, , SV, VV, VS, InfixBitXor );
+ DUNE_SIMD_DO( , VV, , SV, VV, VS, InfixBitOr );
+
+ DUNE_SIMD_DO(SV, VV, VS, SV, VV, VS, InfixLogicAnd );
+ DUNE_SIMD_DO(SV, VV, VS, SV, VV, VS, InfixLogicOr );
+
+ DUNE_SIMD_DO( , VV, , , VV, VS, InfixAssign );
+ DUNE_SIMD_DO( , , , , VV, VS, InfixAssignMul );
+ DUNE_SIMD_DO( , , , , VV, VS, InfixAssignDiv );
+ DUNE_SIMD_DO( , , , , VV, VS, InfixAssignRemainder );
+ DUNE_SIMD_DO( , , , , VV, VS, InfixAssignPlus );
+ DUNE_SIMD_DO( , , , , VV, VS, InfixAssignMinus );
+ DUNE_SIMD_DO( , , , , VV, VS, InfixAssignLeftShift );
+ DUNE_SIMD_DO( , , , , VV, VS, InfixAssignRightShift);
+ DUNE_SIMD_DO( , VV, , , VV, VS, InfixAssignAnd );
+ DUNE_SIMD_DO( , VV, , , VV, VS, InfixAssignXor );
+ DUNE_SIMD_DO( , VV, , , VV, VS, InfixAssignOr );
+
+ DUNE_SIMD_DO(SV, VV, VS, SV, , VS, InfixComma );
+
+#undef DUNE_SIMD_DO
+ }
+
+ //////////////////////////////////////////////////////////////////////
+ //
+ // SIMD interface functions
+ //
+
+ template<class V>
+ void checkAutoCopy()
+ {
+ using RValueResult = decltype(autoCopy(lane(0, std::declval<V>())));
+ static_assert(std::is_same<RValueResult, Scalar<V> >::value,
+ "Result of autoCopy() must always be Scalar<V>");
+
+ using MutableLValueResult =
+ decltype(autoCopy(lane(0, std::declval<V&>())));
+ static_assert(std::is_same<MutableLValueResult, Scalar<V> >::value,
+ "Result of autoCopy() must always be Scalar<V>");
+
+ using ConstLValueResult =
+ decltype(autoCopy(lane(0, std::declval<const V&>())));
+ static_assert(std::is_same<ConstLValueResult, Scalar<V> >::value,
+ "Result of autoCopy() must always be Scalar<V>");
+
+ V vec = make123<V>();
+ for(std::size_t l = 0; l < lanes(vec); ++l)
+ DUNE_SIMD_CHECK(autoCopy(lane(l, vec)) == Scalar<V>(l+1));
+ }
+
+ // may only be called for mask types
+ template<class M>
+ void checkBoolReductions()
+ {
+ M trueVec(true);
+
+ // mutable lvalue
+ DUNE_SIMD_CHECK(allTrue (static_cast<M&>(trueVec)) == true);
+ DUNE_SIMD_CHECK(anyTrue (static_cast<M&>(trueVec)) == true);
+ DUNE_SIMD_CHECK(allFalse(static_cast<M&>(trueVec)) == false);
+ DUNE_SIMD_CHECK(anyFalse(static_cast<M&>(trueVec)) == false);
+
+ // const lvalue
+ DUNE_SIMD_CHECK(allTrue (static_cast<const M&>(trueVec)) == true);
+ DUNE_SIMD_CHECK(anyTrue (static_cast<const M&>(trueVec)) == true);
+ DUNE_SIMD_CHECK(allFalse(static_cast<const M&>(trueVec)) == false);
+ DUNE_SIMD_CHECK(anyFalse(static_cast<const M&>(trueVec)) == false);
+
+ // rvalue
+ DUNE_SIMD_CHECK(allTrue (M(true)) == true);
+ DUNE_SIMD_CHECK(anyTrue (M(true)) == true);
+ DUNE_SIMD_CHECK(allFalse(M(true)) == false);
+ DUNE_SIMD_CHECK(anyFalse(M(true)) == false);
+
+ M falseVec(false);
+
+ // mutable lvalue
+ DUNE_SIMD_CHECK(allTrue (static_cast<M&>(falseVec)) == false);
+ DUNE_SIMD_CHECK(anyTrue (static_cast<M&>(falseVec)) == false);
+ DUNE_SIMD_CHECK(allFalse(static_cast<M&>(falseVec)) == true);
+ DUNE_SIMD_CHECK(anyFalse(static_cast<M&>(falseVec)) == true);
+
+ // const lvalue
+ DUNE_SIMD_CHECK(allTrue (static_cast<const M&>(falseVec)) == false);
+ DUNE_SIMD_CHECK(anyTrue (static_cast<const M&>(falseVec)) == false);
+ DUNE_SIMD_CHECK(allFalse(static_cast<const M&>(falseVec)) == true);
+ DUNE_SIMD_CHECK(anyFalse(static_cast<const M&>(falseVec)) == true);
+
+ // rvalue
+ DUNE_SIMD_CHECK(allTrue (M(false)) == false);
+ DUNE_SIMD_CHECK(anyTrue (M(false)) == false);
+ DUNE_SIMD_CHECK(allFalse(M(false)) == true);
+ DUNE_SIMD_CHECK(anyFalse(M(false)) == true);
+
+ auto mixedVec = broadcast<M>(0);
+ for(std::size_t l = 0; l < lanes(mixedVec); ++l)
+ lane(l, mixedVec) = (l % 2);
+
+ // mutable lvalue
+ DUNE_SIMD_CHECK
+ (allTrue (static_cast<M&>(mixedVec)) == false);
+ DUNE_SIMD_CHECK
+ (anyTrue (static_cast<M&>(mixedVec)) == (lanes<M>() > 1));
+ DUNE_SIMD_CHECK
+ (allFalse(static_cast<M&>(mixedVec)) == (lanes<M>() == 1));
+ DUNE_SIMD_CHECK
+ (anyFalse(static_cast<M&>(mixedVec)) == true);
+
+ // const lvalue
+ DUNE_SIMD_CHECK
+ (allTrue (static_cast<const M&>(mixedVec)) == false);
+ DUNE_SIMD_CHECK
+ (anyTrue (static_cast<const M&>(mixedVec)) == (lanes<M>() > 1));
+ DUNE_SIMD_CHECK
+ (allFalse(static_cast<const M&>(mixedVec)) == (lanes<M>() == 1));
+ DUNE_SIMD_CHECK
+ (anyFalse(static_cast<const M&>(mixedVec)) == true);
+
+ // rvalue
+ DUNE_SIMD_CHECK(allTrue (M(mixedVec)) == false);
+ DUNE_SIMD_CHECK(anyTrue (M(mixedVec)) == (lanes<M>() > 1));
+ DUNE_SIMD_CHECK(allFalse(M(mixedVec)) == (lanes<M>() == 1));
+ DUNE_SIMD_CHECK(anyFalse(M(mixedVec)) == true);
+ }
+
+ template<class V>
+ void checkCond()
+ {
+ using M = Mask<V>;
+
+ static_assert
+ (std::is_same<decltype(cond(std::declval<M>(), std::declval<V>(),
+ std::declval<V>())), V>::value,
+ "The result of cond(M, V, V) should have exactly the type V");
+
+ static_assert
+ (std::is_same<decltype(cond(std::declval<const M&>(),
+ std::declval<const V&>(),
+ std::declval<const V&>())), V>::value,
+ "The result of cond(const M&, const V&, const V&) should have "
+ "exactly the type V");
+
+ static_assert
+ (std::is_same<decltype(cond(std::declval<M&>(), std::declval<V&>(),
+ std::declval<V&>())), V>::value,
+ "The result of cond(M&, V&, V&) should have exactly the type V");
+
+ V vec1 = leftVector<V>();
+ V vec2 = rightVector<V>();
+
+ DUNE_SIMD_CHECK(allTrue(cond(M(true), vec1, vec2) == vec1));
+ DUNE_SIMD_CHECK(allTrue(cond(M(false), vec1, vec2) == vec2));
+
+ auto mixedResult = broadcast<V>(0);
+ auto mixedMask = broadcast<M>(false);
+ for(std::size_t l = 0; l < lanes(mixedMask); ++l)
+ {
+ lane(l, mixedMask ) = (l % 2);
+ lane(l, mixedResult) = lane(l, (l % 2) ? vec1 : vec2);
+ }
+
+ DUNE_SIMD_CHECK(allTrue(cond(mixedMask, vec1, vec2) == mixedResult));
+ }
+
+ template<class V>
+ void checkBoolCond()
+ {
+ static_assert
+ (std::is_same<decltype(cond(std::declval<bool>(), std::declval<V>(),
+ std::declval<V>())), V>::value,
+ "The result of cond(bool, V, V) should have exactly the type V");
+
+ static_assert
+ (std::is_same<decltype(cond(std::declval<const bool&>(),
+ std::declval<const V&>(),
+ std::declval<const V&>())), V>::value,
+ "The result of cond(const bool&, const V&, const V&) should have "
+ "exactly the type V");
+
+ static_assert
+ (std::is_same<decltype(cond(std::declval<bool&>(),
+ std::declval<V&>(),
+ std::declval<V&>())), V>::value,
+ "The result of cond(bool&, V&, V&) should have exactly the type V");
+
+ V vec1 = leftVector<V>();
+ V vec2 = rightVector<V>();
+
+ DUNE_SIMD_CHECK(allTrue(cond(true, vec1, vec2) == vec1));
+ DUNE_SIMD_CHECK(allTrue(cond(false, vec1, vec2) == vec2));
+ }
+
+ template<class V>
+ std::enable_if_t<!Impl::LessThenComparable<Scalar<V> >::value>
+ checkHorizontalMinMax() {}
+
+ template<class V>
+ std::enable_if_t<Impl::LessThenComparable<Scalar<V> >::value>
+ checkHorizontalMinMax()
+ {
+ static_assert
+ (std::is_same<decltype(max(std::declval<V>())), Scalar<V> >::value,
+ "The result of max(V) should be exactly Scalar<V>");
+
+ static_assert
+ (std::is_same<decltype(min(std::declval<V>())), Scalar<V> >::value,
+ "The result of min(V) should be exactly Scalar<V>");
+
+ static_assert
+ (std::is_same<decltype(max(std::declval<V&>())), Scalar<V> >::value,
+ "The result of max(V) should be exactly Scalar<V>");
+
+ static_assert
+ (std::is_same<decltype(min(std::declval<V&>())), Scalar<V> >::value,
+ "The result of min(V) should be exactly Scalar<V>");
+
+ const V vec1 = leftVector<V>();
+
+ DUNE_SIMD_CHECK(max(vec1) == Scalar<V>(lanes(vec1)));
+ DUNE_SIMD_CHECK(min(vec1) == Scalar<V>(1));
+ }
+
+ template<class V>
+ std::enable_if_t<!Impl::LessThenComparable<Scalar<V> >::value>
+ checkBinaryMinMax() {}
+
+ template<class V>
+ std::enable_if_t<Impl::LessThenComparable<Scalar<V> >::value>
+ checkBinaryMinMax()
+ {
+ using std::max;
+ using std::min;
+
+ static_assert
+ (std::is_same<decltype(Simd::max(std::declval<V>(),
+ std::declval<V>())), V>::value,
+ "The result of Simd::max(V, V) should be exactly V");
+ static_assert
+ (std::is_same<decltype(Simd::min(std::declval<V>(),
+ std::declval<V>())), V>::value,
+ "The result of Simd::min(V, V) should be exactly V");
+
+ static_assert
+ (std::is_same<decltype(Simd::max(std::declval<V&>(),
+ std::declval<V&>())), V>::value,
+ "The result of Simd::max(V&, V&) should be exactly V");
+ static_assert
+ (std::is_same<decltype(Simd::min(std::declval<V&>(),
+ std::declval<V&>())), V>::value,
+ "The result of Simd::min(V&, V&) should be exactly V");
+
+ const V arg1 = leftVector<V>();
+ const V arg2 = rightVector<V>();
+
+ V maxExp(Scalar<V>(0)), minExp(Scalar<V>(0));
+ for(auto l : range(lanes<V>()))
+ {
+ lane(l, maxExp) = max(lane(l, arg1), lane(l, arg2));
+ lane(l, minExp) = min(lane(l, arg1), lane(l, arg2));
+ }
+
+ DUNE_SIMD_CHECK(allTrue(maxExp == Simd::max(arg1, arg2)));
+ DUNE_SIMD_CHECK(allTrue(minExp == Simd::min(arg1, arg2)));
+ }
+
+ template<class V>
+ void checkIO()
+ {
+ const V vec1 = leftVector<V>();
+
+ std::string reference;
+ {
+ const char *sep = "";
+ for(auto l : range(lanes(vec1)))
+ {
+ std::ostringstream stream;
+ stream << lane(l, vec1);
+
+ reference += sep;
+ reference += stream.str();
+ sep = ", ";
+ }
+ }
+
+ {
+ std::ostringstream stream;
+ stream << io(vec1);
+ if(lanes(vec1) == 1)
+ DUNE_SIMD_CHECK(stream.str() == reference);
+ else
+ DUNE_SIMD_CHECK(stream.str() == "<" + reference + ">");
+ }
+
+ {
+ std::ostringstream stream;
+ stream << vio(vec1);
+ DUNE_SIMD_CHECK(stream.str() == "<" + reference + ">");
+ }
+ }
+
+#undef DUNE_SIMD_CHECK
+
+ public:
+ /**
+ * @name Test instantiation points
+ *
+ * These functions should not be called directly, but serve as explicit
+ * instantiation points to keep memory usage bounded during compilation.
+ * There should be an explicit instantiation declaration (`extern
+ * template ...`) in the the overall header of your unit test for each
+ * type that is tested (possibly implicitly tested due to recursive
+ * checks). Similarly, there should be an explicit instantiation
+ * definition (`template ...`) in a separate translation unit. Ideally,
+ * there should be one translation unit per explicit instantiation
+ * definition, otherwise each of them will contribute to the overall
+ * memory used during compilation.
+ *
+ * If explicitly instantiating the top-level instantiation point
+ * `checkType()` is not sufficient, there are further instantiation
+ * points for improved granularity. The hierarchy of instantiation
+ * points is:
+ * - `checkType()`
+ * - `checkNonOps()`
+ * - `checkUnaryOps()`
+ * - `checkBinaryOps()`
+ * - `checkBinaryOpsVectorVector()`
+ * - `checkBinaryOpsScalarVector()`
+ * - `checkBinaryOpsVectorScalar()`
+ * - `checkBinaryOpsProxyVector()`
+ * - `checkBinaryOpsVectorProxy()`
+ *
+ * Each instantiation point in the hierarchy implicitly instantiates its
+ * descendants, unless there are explicit instantiation declarations for
+ * them. However, for future-proofing it can make sense to explicitly
+ * instantiate nodes in the hierarchy even if all their children are
+ * already explicitly instantiated. This will limit the impact of
+ * instantiation points added in the future.
+ *
+ * For an example of how to do the instantiations, look at
+ * `standardtest`, there is cmake machinery to support you.
+ *
+ * Background: The compiler can use a lot of memory when compiling a
+ * unit test for many Simd vector types. E.g. for standardtest.cc,
+ * which tests all the fundamental arithmetic types plus \c
+ * std::complex, g++ 4.9.2 (-g -O0 -Wall on x86_64 GNU/Linux) used
+ * ~6GByte.
+ *
+ * One mitigation was to explicitly instantiate \c checkVector() (a
+ * previous, now obsolete incarnation of this instantiation machinery)
+ * for the types that are tested. Still after doing that,
+ * standardtest.cc needed ~1.5GByte during compilation, which is more
+ * than the compilation units that actually instantiated \c
+ * checkVector() (which clocked in at maximum at around 800MB, depending
+ * on how many instantiations they contained).
+ *
+ * The second mitigation was to define \c checkVector() outside of the
+ * class. I have no idea why this helped, but it made compilation use
+ * less than ~100MByte. (Yes, functions defined inside the class are
+ * implicitly \c inline, but the function is a template so it has inline
+ * semantics even when defined outside of the class. And I tried \c
+ * __attribute__((__noinline__)), which had no effect on memory
+ * consumption.)
+ *
+ * @{
+ */
+ template<class V> void checkType();
+ template<class V> void checkNonOps();
+ template<class V> void checkUnaryOps();
+ template<class V> void checkBinaryOps();
+ template<class V> void checkBinaryOpsVectorVector();
+ template<class V> void checkBinaryOpsScalarVector();
+ template<class V> void checkBinaryOpsVectorScalar();
+ template<class V> void checkBinaryOpsProxyVector();
+ template<class V> void checkBinaryOpsVectorProxy();
+ /** @} Group Test instantiation points */
+
+ //! run unit tests for simd vector type V
+ /**
+ * This function will also ensure that `check<W>()` is run, for any type
+ * `W = Rebind<R, V>` where `R` is in `Rebinds`, and
+ * `RebindPrune<W>::value == false`. No test will be run twice for a
+ * given type.
+ *
+ * If the result of `Rebind` is not pruned by `RebindPrune`, it will be
+ * passed to `RebindAccept`. If that rejects the type, a static
+ * assertion will trigger.
+ *
+ * \tparam Rebinds A list of types, usually in the form of a
+ * `TypeList`.
+ * \tparam RebindPrune A type predicate determining whether to run
+ * `check()` for types obtained from `Rebinds`.
+ * \tparam RebindAccept A type predicate determining whether a type is
+ * acceptable as the result of a `Rebind`.
+ */
+ template<class V, class Rebinds,
+ template<class> class RebindPrune = IsLoop,
+ template<class> class RebindAccept = Dune::AlwaysTrue>
+ void check() {
+ // check whether the test for this type already started
+ if(seen_.emplace(typeid (V)).second == false)
+ {
+ // type already seen, nothing to do
+ return;
+ }
+
+ // do these first so everything that appears after "Checking SIMD type
+ // ..." really pertains to that type
+ auto recurse = [this](auto w) {
+ using W = typename decltype(w)::type;
+ this->template check<W, Rebinds, RebindPrune, RebindAccept>();
+ };
+ checkRebindOf<V, Rebinds, RebindPrune, RebindAccept>(recurse);
+
+ checkType<V>();
+ }
+
+ //! whether all tests succeeded
+ bool good() const
+ {
+ return good_;
+ }
+
+ }; // class UnitTest
+
+ template<class V> void UnitTest::checkType()
+ {
+ static_assert(std::is_same<V, std::decay_t<V> >::value, "Simd types "
+ "must not be references, and must not include "
+ "cv-qualifiers");
+
+ log_ << "Checking SIMD type " << className<V>() << std::endl;
+
+ checkNonOps<V>();
+ checkUnaryOps<V>();
+ checkBinaryOps<V>();
+ }
+ template<class V> void UnitTest::checkNonOps()
+ {
+ constexpr auto isMask = typename std::is_same<Scalar<V>, bool>::type{};
+
+ checkLanes<V>();
+ checkScalar<V>();
+
+ checkDefaultConstruct<V>();
+ checkLane<V>();
+ checkCopyMoveConstruct<V>();
+ checkImplCast<V>();
+ checkBroadcast<V>();
+ if constexpr (isMask)
+ this->template checkBroadcastMaskConstruct<V>();
+ else
+ this->template checkBroadcastVectorConstruct<V>();
+ checkBracedAssign<V>();
+ checkBracedBroadcastAssign<V>();
+
+ checkAutoCopy<V>();
+ checkCond<V>();
+ checkBoolCond<V>();
+
+ if constexpr (isMask)
+ this->template checkBoolReductions<V>();
+ // checkBoolReductions() is not applicable for non-masks
+
+ checkHorizontalMinMax<V>();
+ checkBinaryMinMax<V>();
+ checkIO<V>();
+ }
+ template<class V> void UnitTest::checkUnaryOps()
+ {
+ if constexpr (std::is_same_v<Scalar<V>, bool>) {
+ // check mask
+ auto check = [this](auto op) {
+ this->template checkUnaryOpsV<V>(op);
+ };
+
+ // postfix
+ // check(OpPostfixDecrement{});
+ // clang deprecation warning if bool++ is tested
+ // check(OpPostfixIncrement{});
+
+ // prefix
+ // check(OpPrefixDecrement{});
+ // clang deprecation warning if ++bool is tested
+ // check(OpPrefixIncrement{});
+
+ // check(OpPrefixPlus{});
+ // check(OpPrefixMinus{});
+ check(OpPrefixLogicNot{});
+ // check(OpPrefixBitNot{});
+ }
+ else {
+ // check vector
+ auto check = [this](auto op) {
+ this->template checkUnaryOpsV<V>(op);
+ };
+
+ // postfix
+ // check(OpPostfixDecrement{});
+ // check(OpPostfixIncrement{});
+
+ // prefix
+ // check(OpPrefixDecrement{});
+ // check(OpPrefixIncrement{});
+
+ // check(OpPrefixPlus{});
+ check(OpPrefixMinus{});
+ check(OpPrefixLogicNot{});
+ check(OpPrefixBitNot{});
+ }
+ }
+ template<class V> void UnitTest::checkBinaryOps()
+ {
+ checkBinaryOpsVectorVector<V>();
+ checkBinaryOpsScalarVector<V>();
+ checkBinaryOpsVectorScalar<V>();
+ checkBinaryOpsProxyVector<V>();
+ checkBinaryOpsVectorProxy<V>();
+ }
+ template<class V> void UnitTest::checkBinaryOpsVectorVector()
+ {
+ auto checker = [this](auto doSV, auto doVV, auto doVS, auto op) {
+ auto check = [this,op](auto t1, auto t2) {
+ this->checkBinaryOpVV(t1, t2, op);
+ };
+ this->checkBinaryRefQual<V, V, doVV>(check);
+ };
+ checkBinaryOps<V>(checker);
+ }
+ template<class V> void UnitTest::checkBinaryOpsScalarVector()
+ {
+ auto checker = [this](auto doSV, auto doVV, auto doVS, auto op) {
+ auto check = [this,op](auto t1, auto t2) {
+ this->checkBinaryOpSV(t1, t2, op);
+ };
+ this->checkBinaryRefQual<Scalar<V>, V, doSV>(check);
+
+ auto crossCheck = [this,op](auto t1, auto t2) {
+ this->checkBinaryOpVVAgainstSV(t1, t2, op);
+ };
+ this->checkBinaryRefQual<Scalar<V>, V, doSV && doVV>(crossCheck);
+ };
+ checkBinaryOps<V>(checker);
+ }
+ template<class V> void UnitTest::checkBinaryOpsVectorScalar()
+ {
+ auto checker = [this](auto doSV, auto doVV, auto doVS, auto op) {
+ auto check = [this,op](auto t1, auto t2) {
+ this->checkBinaryOpVS(t1, t2, op);
+ };
+ this->checkBinaryRefQual<V, Scalar<V>, doVS>(check);
+
+ auto crossCheck = [this,op](auto t1, auto t2) {
+ this->checkBinaryOpVVAgainstVS(t1, t2, op);
+ };
+ this->checkBinaryRefQual<V, Scalar<V>, doVV && doVS>(crossCheck);
+ };
+ checkBinaryOps<V>(checker);
+ }
+ template<class V> void UnitTest::checkBinaryOpsProxyVector()
+ {
+ auto checker = [this](auto doSV, auto doVV, auto doVS, auto op) {
+ auto check = [this,op](auto t1, auto t2) {
+ this->checkBinaryOpPV(t1, t2, op);
+ };
+ this->checkBinaryRefQual<V, V, doSV>(check);
+ };
+ checkBinaryOps<V>(checker);
+ }
+ template<class V> void UnitTest::checkBinaryOpsVectorProxy()
+ {
+ auto checker = [this](auto doSV, auto doVV, auto doVS, auto op) {
+ auto check = [this,op](auto t1, auto t2) {
+ this->checkBinaryOpVP(t1, t2, op);
+ };
+ this->checkBinaryRefQual<V, V, doVS>(check);
+ };
+ checkBinaryOps<V>(checker);
+ }
+
+ } // namespace Simd
+} // namespace Dune
+
+#endif // DUNE_COMMON_SIMD_TEST_HH
--- /dev/null
+include(DuneCMakeCompat)
+
+# We need to explicitly instantiate the tests for small groups of types --
+# else the compiler will eat excessive amounts of memory. This way it seems
+# to stay below 1GByte (with g++ 4.9.2 -O0 -g on x86_64 GNU/Linux, looking at
+# standardtest).
+include(DuneInstance)
+
+set(TYPES
+ char "unsigned char" "signed char"
+ short int long "long long"
+ "unsigned short" unsigned "unsigned long" "unsigned long long"
+ bool
+ float double "long double"
+ std::complex<float> std::complex<double> "std::complex<long double>"
+ )
+
+# Generate files with instanciations, external declarations, and also the
+# invocations in the test for each instance.
+dune_instance_begin(FILES looptest.hh looptest.cc)
+foreach(SCALAR IN LISTS TYPES)
+ dune_instance_add(ID "${SCALAR}")
+ foreach(POINT IN ITEMS
+ Type
+ BinaryOpsScalarVector BinaryOpsVectorScalar)
+ dune_instance_add(TEMPLATE POINT ID "${POINT}_${SCALAR}"
+ FILES looptest_vector.cc)
+ endforeach()
+endforeach()
+dune_instance_end()
+
+list(FILTER DUNE_INSTANCE_GENERATED INCLUDE REGEX [[\.cc$]])
+dune_add_test(NAME looptest
+ SOURCES ${DUNE_INSTANCE_GENERATED}
+ LINK_LIBRARIES dunecommon
+)
+# no need to install looptest.hh, used by looptest*.cc only
+
+
+
+set(TYPES
+ char "unsigned char" "signed char"
+ short int long "long long"
+ "unsigned short" unsigned "unsigned long" "unsigned long long"
+ bool
+ float double "long double"
+ std::complex<float> std::complex<double> "std::complex<long double>")
+
+# Generate files with instanciations, external declarations, and also the
+# invocations in the test for each instance.
+dune_instance_begin(FILES standardtest.hh standardtest.cc)
+foreach(SCALAR IN LISTS TYPES)
+ dune_instance_add(ID "${SCALAR}" FILES standardtest_vector.cc)
+endforeach()
+dune_instance_end()
+
+list(FILTER DUNE_INSTANCE_GENERATED INCLUDE REGEX [[\.cc$]])
+dune_add_test(NAME standardtest
+ SOURCES ${DUNE_INSTANCE_GENERATED}
+ LINK_LIBRARIES dunecommon
+)
+# no need to install standardtest.hh, used by standardtest*.cc only
+
+
+# as of Vc-1.3.2: Vc/common/simdarray.h:561: SimdArray<T, N> may only be used
+# with T = { double, float, int32_t, uint32_t, int16_t, uint16_t }
+set(VCTEST_TYPES std::int16_t std::uint16_t std::int32_t std::uint32_t float double)
+
+# Generate files with instanciations, external declarations, and also the
+# invocations in the test for each instance.
+dune_instance_begin(FILES vcarraytest.hh vcarraytest.cc)
+foreach(SCALAR IN LISTS VCTEST_TYPES)
+ dune_instance_add(ID "${SCALAR}")
+ foreach(POINT IN ITEMS
+ Type
+ BinaryOpsScalarVector BinaryOpsVectorScalar
+ BinaryOpsProxyVector BinaryOpsVectorProxy)
+ dune_instance_add(TEMPLATE POINT ID "${POINT}_${SCALAR}"
+ FILES vctest_simdarray.cc vctest_simdmaskarray.cc)
+ endforeach()
+endforeach()
+dune_instance_end()
+list(FILTER DUNE_INSTANCE_GENERATED INCLUDE REGEX [[\.cc$]])
+dune_add_test(NAME vcarraytest
+ SOURCES ${DUNE_INSTANCE_GENERATED}
+ LINK_LIBRARIES dunecommon
+ CMAKE_GUARD Vc_FOUND
+)
+add_dune_vc_flags(vcarraytest)
+# no need to install vcarraytest.hh, used by vctest*.cc only
+
+# Generate files with instanciations, external declarations, and also the
+# invocations in the test for each instance.
+dune_instance_begin(FILES vcvectortest.hh vcvectortest.cc)
+foreach(SCALAR IN LISTS VCTEST_TYPES)
+ dune_instance_add(ID "${SCALAR}")
+ foreach(POINT IN ITEMS
+ Type
+ BinaryOpsScalarVector BinaryOpsVectorScalar
+ BinaryOpsProxyVector BinaryOpsVectorProxy)
+ dune_instance_add(TEMPLATE POINT ID "${POINT}_${SCALAR}"
+ FILES vctest_vector.cc vctest_mask.cc)
+ endforeach()
+endforeach()
+dune_instance_end()
+list(FILTER DUNE_INSTANCE_GENERATED INCLUDE REGEX [[\.cc$]])
+dune_add_test(NAME vcvectortest
+ SOURCES ${DUNE_INSTANCE_GENERATED}
+ LINK_LIBRARIES dunecommon
+ CMAKE_GUARD Vc_FOUND
+)
+add_dune_vc_flags(vcvectortest)
+# no need to install vcvectortest.hh, used by vctest*.cc only
--- /dev/null
+// @GENERATED_SOURCE@
+
+#if HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <cstdlib>
+#include <type_traits>
+
+#include <dune/common/simd/loop.hh>
+#include <dune/common/simd/test.hh>
+#include <dune/common/simd/test/looptest.hh>
+#include <dune/common/typetraits.hh>
+
+template<class> struct RebindAccept : std::false_type {};
+#cmake @template@
+template<std::size_t A>
+struct RebindAccept<Dune::LoopSIMD<@SCALAR@, 5, A> > : std::true_type {};
+template<std::size_t A1, std::size_t A2>
+struct RebindAccept<Dune::LoopSIMD<Dune::LoopSIMD<@SCALAR@, 2, A1>, 5, A2> > : std::true_type {};
+#cmake @endtemplate@
+
+using Rebinds = Dune::Simd::RebindList<
+#cmake @template@
+ @SCALAR@,
+#cmake @endtemplate@
+ Dune::Simd::EndMark>;
+
+int main()
+{
+ Dune::Simd::UnitTest test;
+
+#cmake @template@
+ test.check<Dune::LoopSIMD<@SCALAR@, 5>,
+ Rebinds, Dune::AlwaysFalse, RebindAccept>();
+ test.check<Dune::LoopSIMD<@SCALAR@, 5, 64>,
+ Rebinds, Dune::AlwaysFalse, RebindAccept>();
+ test.check<Dune::LoopSIMD<Dune::LoopSIMD<@SCALAR@, 2>, 5>,
+ Rebinds, Dune::AlwaysFalse, RebindAccept>();
+#cmake @endtemplate@
+
+ return test.good() ? EXIT_SUCCESS : EXIT_FAILURE;
+}
--- /dev/null
+// @GENERATED_SOURCE@
+
+#ifndef DUNE_COMMON_SIMD_TEST_LOOPTEST_HH
+#define DUNE_COMMON_SIMD_TEST_LOOPTEST_HH
+
+#include <complex> // as inserted by substitutions
+
+#include <dune/common/simd/test.hh>
+#include <dune/common/simd/loop.hh>
+
+namespace Dune {
+ namespace Simd {
+
+#cmake @template POINT@
+ extern template void
+ UnitTest::check@POINT@<LoopSIMD<@SCALAR@, 5> >();
+ extern template void
+ UnitTest::check@POINT@<LoopSIMD<@SCALAR@, 5, 64> >();
+ extern template void
+ UnitTest::check@POINT@<LoopSIMD<LoopSIMD<@SCALAR@, 2>, 5> >();
+#cmake @endtemplate@
+
+ } //namespace Simd
+} // namespace Dune
+
+#endif
--- /dev/null
+// @GENERATED_SOURCE@
+
+#include <config.h>
+
+#include <dune/common/simd/test/looptest.hh>
+
+namespace Dune {
+ namespace Simd {
+
+ template void UnitTest::check@POINT@<LoopSIMD<@SCALAR@, 5> >();
+ template void UnitTest::check@POINT@<LoopSIMD<@SCALAR@, 5, 64> >();
+ template void UnitTest::check@POINT@<LoopSIMD<LoopSIMD<@SCALAR@, 2>, 5> >();
+
+ } //namespace Simd
+} // namespace Dune
--- /dev/null
+// @GENERATED_SOURCE@
+
+#if HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <cstdlib>
+#include <type_traits>
+
+#include <dune/common/typetraits.hh>
+#include <dune/common/simd/test.hh>
+#include <dune/common/simd/test/standardtest.hh>
+
+template<class> struct RebindAccept : std::false_type {};
+#cmake @template@
+template<> struct RebindAccept<@SCALAR@> : std::true_type {};
+#cmake @endtemplate@
+
+using Rebinds = Dune::Simd::RebindList<
+#cmake @template@
+ @SCALAR@,
+#cmake @endtemplate@
+ Dune::Simd::EndMark>;
+
+int main()
+{
+ Dune::Simd::UnitTest test;
+
+#cmake @template@
+ test.check<@SCALAR@, Rebinds, Dune::AlwaysFalse, RebindAccept>();
+#cmake @endtemplate@
+
+ return test.good() ? EXIT_SUCCESS : EXIT_FAILURE;
+}
--- /dev/null
+// @GENERATED_SOURCE@
+
+#ifndef DUNE_COMMON_SIMD_TEST_STANDARDTEST_HH
+#define DUNE_COMMON_SIMD_TEST_STANDARDTEST_HH
+
+#include <complex> // for substituted types
+
+#include <dune/common/simd/test.hh>
+
+namespace Dune {
+ namespace Simd {
+
+#cmake @template@
+ extern template void UnitTest::checkType<@SCALAR@>();
+#cmake @endtemplate@
+
+ } // namespace Simd
+} // namespace Dune
+
+#endif // DUNE_COMMON_SIMD_TEST_STANDARDTEST_HH
--- /dev/null
+// @GENERATED_SOURCE@
+
+#include <config.h>
+
+#include <dune/common/simd/test.hh>
+#include <dune/common/simd/test/standardtest.hh>
+
+namespace Dune {
+ namespace Simd {
+
+ template void UnitTest::checkType<@SCALAR@>();
+
+ } // namespace Simd
+} // namespace Dune
--- /dev/null
+// @GENERATED_SOURCE@
+
+#if HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#if !HAVE_VC
+#error Inconsistent buildsystem. This program should not be built in the \
+ absence of Vc.
+#endif
+
+#include <cstddef>
+#include <cstdlib>
+#include <type_traits>
+
+#include <dune/common/simd/test.hh>
+#include <dune/common/simd/test/vcarraytest.hh>
+#include <dune/common/simd/vc.hh>
+#include <dune/common/typelist.hh>
+
+template<class T> struct RebindAccept : std::false_type {};
+#cmake @template@
+template<>
+struct RebindAccept<Vc::SimdArray<@SCALAR@, ::lanes> > : std::true_type {};
+template<>
+struct RebindAccept<Vc::SimdMaskArray<@SCALAR@, ::lanes> > : std::true_type {};
+#cmake @endtemplate@
+
+using Rebinds = Dune::TypeList<
+#cmake @template@
+ @SCALAR@,
+#cmake @endtemplate@
+ bool,
+ std::size_t>;
+
+int main()
+{
+ using Vc::Vector;
+ using Vc::SimdArray;
+
+ Dune::Simd::UnitTest test;
+
+#@template@
+ test.check<SimdArray<@SCALAR@, ::lanes>,
+ Rebinds, Dune::Simd::IsLoop, RebindAccept>();
+#@endtemplate@
+
+ return test.good() ? EXIT_SUCCESS : EXIT_FAILURE;
+}
--- /dev/null
+// @GENERATED_SOURCE@
+
+#ifndef DUNE_COMMON_SIMD_TEST_VCTEST_HH
+#define DUNE_COMMON_SIMD_TEST_VCTEST_HH
+
+#if HAVE_VC
+
+#include <cstddef>
+#include <cstdint> // for std::[u]int#_t as substituted
+
+#include <dune/common/simd/test.hh>
+#include <dune/common/simd/vc.hh>
+
+constexpr std::size_t lanes = 4;
+
+namespace Dune {
+ namespace Simd {
+
+#cmake @template POINT@
+ extern template void
+ UnitTest::check@POINT@<Vc::SimdArray<@SCALAR@, ::lanes> >();
+ extern template void
+ UnitTest::check@POINT@<Vc::SimdMaskArray<@SCALAR@, ::lanes> >();
+#cmake @endtemplate@
+
+ } // namespace Simd
+} // namespace Dune
+
+#endif // HAVE_VC
+#endif // DUNE_COMMON_SIMD_TEST_VCTEST_HH
--- /dev/null
+// @GENERATED_SOURCE@
+
+#include <config.h>
+
+#include <dune/common/simd/test/vcvectortest.hh>
+
+namespace Dune {
+ namespace Simd {
+
+ template void UnitTest::check@POINT@<Vc::Mask<@SCALAR@> >();
+
+ } // namespace Simd
+} // namespace Dune
--- /dev/null
+// @GENERATED_SOURCE@
+
+#include <config.h>
+
+#include <dune/common/simd/test/vcarraytest.hh>
+
+namespace Dune {
+ namespace Simd {
+
+ template void
+ UnitTest::check@POINT@<Vc::SimdArray<@SCALAR@, ::lanes> >();
+
+ } // namespace Simd
+} // namespace Dune
--- /dev/null
+// @GENERATED_SOURCE@
+
+#include <config.h>
+
+#include <dune/common/simd/test/vcarraytest.hh>
+
+namespace Dune {
+ namespace Simd {
+
+ template void
+ UnitTest::check@POINT@<Vc::SimdMaskArray<@SCALAR@, ::lanes> >();
+
+ } // namespace Simd
+} // namespace Dune
--- /dev/null
+// @GENERATED_SOURCE@
+
+#include <config.h>
+
+#include <dune/common/simd/test/vcvectortest.hh>
+
+namespace Dune {
+ namespace Simd {
+
+ template void UnitTest::check@POINT@<Vc::Vector<@SCALAR@> >();
+
+ } // namespace Simd
+} // namespace Dune
--- /dev/null
+// @GENERATED_SOURCE@
+
+#if HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#if !HAVE_VC
+#error Inconsistent buildsystem. This program should not be built in the \
+ absence of Vc.
+#endif
+
+#include <cstddef>
+#include <cstdlib>
+#include <type_traits>
+
+#include <dune/common/simd/test.hh>
+#include <dune/common/simd/test/vcvectortest.hh>
+#include <dune/common/simd/vc.hh>
+#include <dune/common/typelist.hh>
+
+template<class> struct RebindAccept : std::false_type {};
+#cmake @template@
+template<> struct RebindAccept<Vc::Vector<@SCALAR@> > : std::true_type {};
+template<> struct RebindAccept<Vc::Mask<@SCALAR@> > : std::true_type {};
+#cmake @endtemplate@
+
+// ignore rebinds to LoopSIMD as well as Vc::SimdArray
+template<class T> struct Prune : Dune::Simd::IsLoop<T> {};
+template<class T, std::size_t n, class V, std::size_t m>
+struct Prune<Vc::SimdArray<T, n, V, m> > : std::true_type {};
+
+using Rebinds = Dune::TypeList<
+#cmake @template@
+ @SCALAR@,
+#cmake @endtemplate@
+ bool,
+ std::size_t>;
+
+int main()
+{
+ using Vc::Vector;
+ using Vc::SimdArray;
+
+ Dune::Simd::UnitTest test;
+
+#@template@
+ test.check<Vector<@SCALAR@>, Rebinds, Prune, RebindAccept>();
+#@endtemplate@
+
+ return test.good() ? EXIT_SUCCESS : EXIT_FAILURE;
+}
--- /dev/null
+// @GENERATED_SOURCE@
+
+#ifndef DUNE_COMMON_SIMD_TEST_VCTEST_HH
+#define DUNE_COMMON_SIMD_TEST_VCTEST_HH
+
+#if HAVE_VC
+
+#include <cstdint>
+
+#include <dune/common/simd/test.hh>
+#include <dune/common/simd/vc.hh>
+#include <dune/common/typelist.hh>
+
+namespace Dune {
+ namespace Simd {
+
+#cmake @template POINT@
+ extern template void UnitTest::check@POINT@<Vc::Vector<@SCALAR@> >();
+ extern template void UnitTest::check@POINT@<Vc::Mask<@SCALAR@> >();
+#cmake @endtemplate@
+
+ } // namespace Simd
+} // namespace Dune
+
+#endif // HAVE_VC
+#endif // DUNE_COMMON_SIMD_TEST_VCTEST_HH
--- /dev/null
+#ifndef DUNE_COMMON_SIMD_VC_HH
+#define DUNE_COMMON_SIMD_VC_HH
+
+/** @file
+ * @ingroup SIMDVc
+ * @brief SIMD abstractions for Vc
+ */
+
+#include <cstddef>
+#include <type_traits>
+#include <utility>
+
+#include <dune/common/indices.hh>
+#include <dune/common/simd/base.hh>
+#include <dune/common/simd/defaults.hh> // for anyFalse()
+#include <dune/common/simd/loop.hh>
+#include <dune/common/typetraits.hh>
+#include <dune/common/vc.hh>
+
+/** @defgroup SIMDVc SIMD Abstraction Implementation for Vc
+ * @ingroup SIMDApp
+ *
+ * This implements the vectorization interface for Vc types, namely
+ * `Vc::Vector`, `Vc::Mask`, `Vc::SimdArray` and `Vc::SimdMask`.
+ *
+ * As an application developer, you need to `#include
+ * <dune/common/simd/vc.hh>`. You need to make sure that `HAVE_VC` is true
+ * before doing so:
+ *
+ * - If your program works both in the presence and the absence of Vc, wrap
+ * the include in `#if HAVE_VC` and `#endif`
+ *
+ * - If you write a unit test, in your `CMakeLists.txt` use
+ * `dune_add_test(... CMAKE_GUARD Vc_FOUND)`
+ *
+ * You also need to make sure that the compiler uses the correct flags and
+ * that the linker can find the library. (The compilation flags include one
+ * that ensures a name mangling scheme that can distiguish the
+ * compiler-intrinsic vector types from non-vector types is used.)
+ *
+ * - Either use `add_dune_vc_flags(your_application)` in `CMakeLists.txt`,
+ *
+ * - or use `dune_enable_all_packages()` in your module's toplevel
+ * `CMakeLists.txt`.
+ *
+ * There should be no need to explicitly call `find_package(Vc)` in your
+ * `CMakeLists.txt`, dune-common already does that. If your module can't live
+ * without Vc, you may however want to do something like this in your
+ * `cmake/modules/YourModuleMacros.cmake`:
+ * \code
+ * if(NOT Vc_FOUND)
+ * message(SEND_ERROR "This module requires Vc")
+ * endif(NOT Vc_FOUND)
+ * \endcode
+ *
+ * If you just want to compile dune, and have Vc installed to a location where
+ * cmake is not looking by default, you need to add that location to
+ * `CMAKE_PREFIX_PATH`. E.g. pass `-DCMAKE_PREFIX_PATH=$VCDIR` to cmake, for
+ * instance by including that in the variable `CMAKE_FLAGS` in the options
+ * file that you pass to dunecontrol. `$VCDIR` should be the same directory
+ * that you passed in `-DCMAKE_INSTALL_PREFIX=...` to cmake when you compiled
+ * Vc, i.e. Vc's main include file should be found under
+ * `$VCDIR/include/Vc/Vc`, and the library under `$VCDIR/lib/libVc.a` or
+ * similar.
+ *
+ * @section SIMDVcRestrictions Restrictions
+ *
+ * During thorough testing of the Vc abstraction implementation it turned out
+ * that certain operations were not supported, or were buggy. This meant that
+ * the tests had to be relaxed, and/or some restrictions had to made as to how
+ * things must be done through the SIMD abstraction, see @ref
+ * simd_abstraction_limit.
+ *
+ * For future reference, here is a detailed list of things that certain Vc
+ * types do or don't support. `s` denotes a scalar object/expression (i.e. of
+ * type `double` or in the case of masks `bool`). `v` denotes a vector/mask
+ * object/expression. `sv` means that both scalar and vector arguments are
+ * accepted. `V` denotes a vector/mask type. `@` means any applicable
+ * operator that is not otherwise listed.
+ *
+ * <!-- The following table is in orgtbl format -- If you are using emacs, you
+ * may want to enable the `orgtbl` minor mode. We substitute `|` with
+ * `¦` when describing or-operators so as to not confuse orgtbl. -->
+ * \code
+ | | Vector | Vector | SimdArray | SimdArray | Masks[4] |
+ | | <double> AVX | <int> SSE | <double,4> | <int,4> | |
+ |-------------------------+--------------+-----------+------------+-----------+-----------|
+ | V v(s); | y | y | y | y | y |
+ | V v = s; | y | y | y | y | *N* |
+ | V v{s}; | *N* | y | *N* | *N* | y |
+ | V v = {s}; | *N* | y | *N* | *N* | *N* |
+ |-------------------------+--------------+-----------+------------+-----------+-----------|
+ | v = s; | y | y | y | y | *N* |
+ | v = {s}; | *N* | *N* | *N* | *N* | *N* |
+ |-------------------------+--------------+-----------+------------+-----------+-----------|
+ | v++; ++v; | y | y | *N* | *N* | y(n/a)[2] |
+ | v--; --v; | y | y | *N* | *N* | n/a |
+ |-------------------------+--------------+-----------+------------+-----------+-----------|
+ | +v; -v; | y | y | y | y | *N* |
+ | !v; | y | y | y | y | y |
+ | ~v; | n/a | y | n/a | y | *N* |
+ |-------------------------+--------------+-----------+------------+-----------+-----------|
+ | sv @ sv; but see below | y | y | y | y | *N* |
+ |-------------------------+--------------+-----------+------------+-----------+-----------|
+ | s << v; s >> v; | n/a | *N* | n/a | *N* | *N* |
+ |-------------------------+--------------+-----------+------------+-----------+-----------|
+ | v == v; v != v; | y | y | y | y | *N* [1] |
+ |-------------------------+--------------+-----------+------------+-----------+-----------|
+ | v & v; v ^ v; v ¦ v; | n/a | y | n/a | y | y |
+ | sv && sv; sv ¦¦ sv; | y | y | *N* | *N* | *N* |
+ | v && v; v ¦¦ v; | y | y | *N* | *N* | y |
+ |-------------------------+--------------+-----------+------------+-----------+-----------|
+ | v @= sv; but see below | y | y | y | y | *N* |
+ | v &= v; v ^= v; v ¦= v; | n/a | y | n/a | y | y |
+ |-------------------------+--------------+-----------+------------+-----------+-----------|
+ | v, v;[3] | *N* | *N* | y | y | y |
+ * \endcode
+ *
+ * Notes:
+ *
+ * - [1] The result of the mask-mask `==` and `!=` operation is a scalar.
+ *
+ * - [2] `++` (either kind) on bools is deprecated by the standard
+ *
+ * - [3] contrary to the other operators, the expected result for `(sv1, sv2)`
+ * is exactly `sv2`, no broadcasting applied.
+ *
+ * - [4] Checked with `Vector<int>::Mask` [SSE] and `SimdArray<int, 4>::Mask`,
+ * which behaved identical
+ *
+ * Support levels:
+ *
+ * - `y`: operation generally works; some instances of the operation may not
+ * apply
+ *
+ * - `*N*`: operation generally does not work; some instances of the operation
+ * may not apply
+ *
+ * - `n/a`: operation does not apply (i.e. bitwise operations to
+ * floating-point operands, `--` (and in the future possibly `++`) to
+ * boolean operands, assignment operators to scalar left hand sides)
+ *
+ * Each operation was tested with the full set of combinations of possible
+ * `const`/non-`const` lvalue/xvalue arguments. Each combination of constness
+ * and value category was applied to the scalar type and the operation tried
+ * in an SFINAE context; combinations that failed here were skipped for vector
+ * arguments too.
+ */
+
+namespace Dune {
+ namespace Simd {
+
+ namespace VcImpl {
+
+ //! specialized to true for Vc mask types
+ template<class V, class SFINAE = void>
+ struct IsMask : std::false_type {};
+
+ template<typename T, typename A>
+ struct IsMask<Vc::Mask<T, A> > : std::true_type {};
+
+ template<typename T, std::size_t n, typename V, std::size_t m>
+ struct IsMask<Vc::SimdMaskArray<T, n, V, m> > : std::true_type {};
+
+ //! specialized to true for Vc vector and mask types
+ template<class V, class SFINAE = void>
+ struct IsVector : IsMask<V> {};
+
+ template<typename T, typename A>
+ struct IsVector<Vc::Vector<T, A> > : std::true_type {};
+
+ template<typename T, std::size_t n, typename V, std::size_t m>
+ struct IsVector<Vc::SimdArray<T, n, V, m> > : std::true_type {};
+
+ template<typename T> struct IsVectorizable : std::false_type {};
+ template<> struct IsVectorizable<double> : std::true_type {};
+ template<> struct IsVectorizable<float> : std::true_type {};
+ template<> struct IsVectorizable<std::int32_t> : std::true_type {};
+ template<> struct IsVectorizable<std::uint32_t> : std::true_type {};
+ template<> struct IsVectorizable<std::int16_t> : std::true_type {};
+ template<> struct IsVectorizable<std::uint16_t> : std::true_type {};
+
+ //! A reference-like proxy for elements of random-access vectors.
+ /**
+ * This is necessary because Vc's lane-access operation return a proxy
+ * that cannot constructed by non-Vc code (i.e. code that isn't
+ * explicitly declared `friend`). This means in particular that there
+ * is no copy/move constructor, meaning we cannot return such proxies
+ * from our own functions, such as `lane()`. To work around this, we
+ * define our own proxy class which internally holds a reference to the
+ * vector and a lane index.
+ *
+ * Note: this should be unnecessary with C++17, as just returning a
+ * temporary object should not involve copying it.
+ */
+ template<class V>
+ class Proxy
+ {
+ static_assert(std::is_same<V, std::decay_t<V> >::value, "Class Proxy "
+ "may only be instantiated with unqualified types");
+ public:
+ using value_type = typename V::value_type;
+
+ private:
+ static_assert(std::is_arithmetic<value_type>::value,
+ "Only artihmetic types are supported");
+ V &vec_;
+ std::size_t idx_;
+
+ public:
+ Proxy(std::size_t idx, V &vec)
+ : vec_(vec), idx_(idx)
+ { }
+
+ Proxy(const Proxy&) = delete;
+ // allow move construction so we can return proxies from functions
+ Proxy(Proxy&&) = default;
+
+ operator value_type() const { return vec_[idx_]; }
+
+ // assignment operators
+#define DUNE_SIMD_VC_ASSIGNMENT(OP) \
+ template<class T, \
+ class = decltype(std::declval<value_type&>() OP \
+ autoCopy(std::declval<T>()) )> \
+ Proxy operator OP(T &&o) && \
+ { \
+ vec_[idx_] OP autoCopy(std::forward<T>(o)); \
+ return { idx_, vec_ }; \
+ }
+ DUNE_SIMD_VC_ASSIGNMENT(=);
+ DUNE_SIMD_VC_ASSIGNMENT(*=);
+ DUNE_SIMD_VC_ASSIGNMENT(/=);
+ DUNE_SIMD_VC_ASSIGNMENT(%=);
+ DUNE_SIMD_VC_ASSIGNMENT(+=);
+ DUNE_SIMD_VC_ASSIGNMENT(-=);
+ DUNE_SIMD_VC_ASSIGNMENT(<<=);
+ DUNE_SIMD_VC_ASSIGNMENT(>>=);
+ DUNE_SIMD_VC_ASSIGNMENT(&=);
+ DUNE_SIMD_VC_ASSIGNMENT(^=);
+ DUNE_SIMD_VC_ASSIGNMENT(|=);
+#undef DUNE_SIMD_VC_ASSIGNMENT
+
+ // unary (prefix) operators
+ template<class T = value_type,
+ class = std::enable_if_t<!std::is_same<T, bool>::value> >
+ Proxy operator++() { ++(vec_[idx_]); return *this; }
+ template<class T = value_type,
+ class = std::enable_if_t<!std::is_same<T, bool>::value> >
+ Proxy operator--() { --(vec_[idx_]); return *this; }
+
+ // postfix operators
+ template<class T = value_type,
+ class = std::enable_if_t<!std::is_same<T, bool>::value> >
+ value_type operator++(int) { return vec_[idx_]++; }
+ template<class T = value_type,
+ class = std::enable_if_t<!std::is_same<T, bool>::value> >
+ value_type operator--(int) { return vec_[idx_]--; }
+
+
+ // swap on proxies swaps the proxied vector entries. As such, it
+ // applies to rvalues of proxies too, not just lvalues
+ friend void swap(const Proxy &a, const Proxy &b) {
+ // don't use swap() ourselves -- not supported by Vc 1.3.0 (but is
+ // supported by Vc 1.3.2)
+ value_type tmp = std::move(a.vec_[a.idx_]);
+ a.vec_[a.idx_] = std::move(b.vec_[b.idx_]);
+ b.vec_[b.idx_] = std::move(tmp);
+ }
+ friend void swap(value_type &a, const Proxy &b) {
+ // don't use swap() ourselves -- not supported by Vc 1.3.0 (but is
+ // supported by Vc 1.3.2)
+ value_type tmp = std::move(a);
+ a = std::move(b.vec_[b.idx_]);
+ b.vec_[b.idx_] = std::move(tmp);
+ }
+ friend void swap(const Proxy &a, value_type &b) {
+ // don't use swap() ourselves -- not supported by Vc 1.3.0 (but is
+ // supported by Vc 1.3.2)
+ value_type tmp = std::move(a.vec_[a.idx_]);
+ a.vec_[a.idx_] = std::move(b);
+ b = std::move(tmp);
+ }
+
+ // binary operators
+ //
+ // Normally, these are provided by the conversion operator in
+ // combination with C++'s builtin binary operators. Other classes
+ // that need to provide the binary operators themselves should either
+ // 1. deduce the "foreign" operand type independently, i.e. use
+ // template<class... Args, class Foreign>
+ // auto operator@(MyClass<Args...>, Foreign);
+ // or
+ // 2. not deduce anything from the foreign argument, i.e.
+ // template<class... Args>
+ // auto operator@(MyClass<Args...>,
+ // typename MyClass<Args...>::value_type);
+ // or
+ // template<class T, class... Args>
+ // struct MyClass {
+ // auto operator@(T);
+ // }
+ // or
+ // template<class T, class... Args>
+ // struct MyClass {
+ // friend auto operator@(MyClass, T);
+ // }
+ //
+ // This allows either for an exact match (in the case of option 1.) or
+ // for conversions to be applied to the foreign argument (options 2.).
+ // In contrast, allowing some of the template parameters being deduced
+ // from the self argument also being deduced from the foreign argument
+ // will likely lead to ambigous deduction when the foreign argument is
+ // a proxy:
+ // template<class T, class... Args>
+ // auto operator@(MyClass<T, Args...>, T);
+ // One class that suffers from this problem ist std::complex.
+ //
+ // Note that option 1. is a bit dangerous, as the foreign argument is
+ // catch-all. This seems tempting in the case of a proxy class, as
+ // the operator could just be forwarded to the proxied object with the
+ // foreign argument unchanged, immediately creating interoperability
+ // with arbitrary foreign classes. However, if the foreign class also
+ // choses option 1., this will result in ambigous overloads, and there
+ // is no clear guide to decide which class should provide the overload
+ // and which should not.
+ //
+ // Fortunately, deferring to the conversion and the built-in operators
+ // mostly works in the case of this proxy class, because only built-in
+ // types can be proxied anyway. Unfortunately, the Vc vectors and
+ // arrays suffer from a slightly different problem. They chose option
+ // 1., but they can't just accept the argument type they are given,
+ // since they need to somehow implement the operation in terms of
+ // intrinsics. So they check the argument whether it is one of the
+ // expected types, and remove the operator from the overload set if it
+ // isn't via SFINAE. Of course, this proxy class is not one of the
+ // expected types, even though it would convert to them...
+ //
+ // So what we have to do here, unfortunately, is to provide operators
+ // for the Vc types explicitly, and hope that there won't be some Vc
+ // version that gets the operators right, thus creating ambigous
+ // overloads. Well, if guess it will be #ifdef time if it comes to
+ // that.
+#define DUNE_SIMD_VC_BINARY(OP) \
+ template<class T, class Abi> \
+ friend auto operator OP(const Vc::Vector<T, Abi> &l, Proxy&& r) \
+ -> decltype(l OP std::declval<value_type>()) \
+ { \
+ return l OP value_type(r); \
+ } \
+ template<class T, class Abi> \
+ auto operator OP(const Vc::Vector<T, Abi> &r) && \
+ -> decltype(std::declval<value_type>() OP r) \
+ { \
+ return value_type(*this) OP r; \
+ } \
+ template<class T, std::size_t n, class Vec, std::size_t m> \
+ friend auto \
+ operator OP(const Vc::SimdArray<T, n, Vec, m> &l, Proxy&& r) \
+ -> decltype(l OP std::declval<value_type>()) \
+ { \
+ return l OP value_type(r); \
+ } \
+ template<class T, std::size_t n, class Vec, std::size_t m> \
+ auto operator OP(const Vc::SimdArray<T, n, Vec, m> &r) && \
+ -> decltype(std::declval<value_type>() OP r) \
+ { \
+ return value_type(*this) OP r; \
+ }
+
+ DUNE_SIMD_VC_BINARY(*);
+ DUNE_SIMD_VC_BINARY(/);
+ DUNE_SIMD_VC_BINARY(%);
+ DUNE_SIMD_VC_BINARY(+);
+ DUNE_SIMD_VC_BINARY(-);
+ DUNE_SIMD_VC_BINARY(<<);
+ DUNE_SIMD_VC_BINARY(>>);
+ DUNE_SIMD_VC_BINARY(&);
+ DUNE_SIMD_VC_BINARY(^);
+ DUNE_SIMD_VC_BINARY(|);
+ DUNE_SIMD_VC_BINARY(<);
+ DUNE_SIMD_VC_BINARY(>);
+ DUNE_SIMD_VC_BINARY(<=);
+ DUNE_SIMD_VC_BINARY(>=);
+ DUNE_SIMD_VC_BINARY(==);
+ DUNE_SIMD_VC_BINARY(!=);
+#undef DUNE_SIMD_VC_BINARY
+
+ // this is needed to implement broadcast construction from proxy as
+ // the unadorned assignment operator cannot be a non-member
+ template<class T, class Abi,
+ class = std::enable_if_t<std::is_convertible<value_type,
+ T>::value> >
+ operator Vc::Vector<T, Abi>() &&
+ {
+ return value_type(*this);
+ }
+ template<class T, std::size_t n, class Vec, std::size_t m,
+ class = std::enable_if_t<std::is_convertible<value_type,
+ T>::value> >
+ operator Vc::SimdArray<T, n, Vec, m>() &&
+ {
+ return value_type(*this);
+ }
+
+#define DUNE_SIMD_VC_ASSIGN(OP) \
+ template<class T, class Abi> \
+ friend auto operator OP(Vc::Vector<T, Abi> &l, Proxy&& r) \
+ -> decltype(l OP std::declval<value_type>()) \
+ { \
+ return l OP value_type(r); \
+ }
+
+ DUNE_SIMD_VC_ASSIGN(*=);
+ DUNE_SIMD_VC_ASSIGN(/=);
+ DUNE_SIMD_VC_ASSIGN(%=);
+ DUNE_SIMD_VC_ASSIGN(+=);
+ DUNE_SIMD_VC_ASSIGN(-=);
+ DUNE_SIMD_VC_ASSIGN(&=);
+ DUNE_SIMD_VC_ASSIGN(^=);
+ DUNE_SIMD_VC_ASSIGN(|=);
+ // The shift assignment would not be needed for Vc::Vector since it
+ // has overloads for `int` rhs and the proxy can convert to that --
+ // except that there is also overloads for Vector, and because of the
+ // conversion operator needed to support unadorned assignments, the
+ // proxy can convert to that, too.
+ DUNE_SIMD_VC_ASSIGN(<<=);
+ DUNE_SIMD_VC_ASSIGN(>>=);
+#undef DUNE_SIMD_VC_ASSIGN
+ };
+
+ } // namespace VcImpl
+
+ namespace Overloads {
+
+ /** @name Specialized classes and overloaded functions
+ * @ingroup SIMDVc
+ * @{
+ */
+
+ //! should have a member type \c type
+ /**
+ * Implements Simd::Scalar
+ */
+ template<class V>
+ struct ScalarType<V, std::enable_if_t<VcImpl::IsVector<V>::value> >
+ {
+ using type = typename V::value_type;
+ };
+
+ //! should have a member type \c type
+ /**
+ * Implements Simd::Rebind
+ *
+ * This specialization covers
+ * - Mask -> bool
+ * - Vector -> Scalar<Vector>
+ */
+ template<class V>
+ struct RebindType<Simd::Scalar<V>, V,
+ std::enable_if_t<VcImpl::IsVector<V>::value> >
+ {
+ using type = V;
+ };
+
+ //! should have a member type \c type
+ /**
+ * Implements Simd::Rebind
+ *
+ * This specialization covers
+ * - Vector -> bool
+ */
+ template<class V>
+ struct RebindType<bool, V, std::enable_if_t<VcImpl::IsVector<V>::value &&
+ !VcImpl::IsMask<V>::value>>
+ {
+ using type = typename V::mask_type;
+ };
+
+ //! should have a member type \c type
+ /**
+ * Implements Simd::Rebind
+ *
+ * This specialization covers
+ * - Mask -> Scalar<Mask::Vector>
+ */
+ template<class M>
+ struct RebindType<Scalar<typename M::Vector>, M,
+ std::enable_if_t<VcImpl::IsMask<M>::value>>
+ {
+ using type = typename M::Vector;
+ };
+
+ //! should have a member type \c type
+ /**
+ * Implements Simd::Rebind
+ *
+ * This specialization covers
+ * - Mask -> Vc-vectorizable type except bool, Scalar<Mask::Vector>
+ */
+ template<class S, class M>
+ struct RebindType<S, M,
+ std::enable_if_t<
+ VcImpl::IsMask<M>::value &&
+ VcImpl::IsVectorizable<S>::value &&
+ !std::is_same<S, Scalar<typename M::Vector> >::value
+ > >
+ {
+ using type = Vc::SimdArray<S, Simd::lanes<M>()>;
+ };
+
+ //! should have a member type \c type
+ /**
+ * Implements Simd::Rebind
+ *
+ * This specialization covers
+ * - Vector -> Vc-vectorizable type except bool, Scalar<Vector>
+ */
+ template<class S, class V>
+ struct RebindType<S, V,
+ std::enable_if_t<VcImpl::IsVector<V>::value &&
+ !VcImpl::IsMask<V>::value &&
+ VcImpl::IsVectorizable<S>::value &&
+ !std::is_same<S, Scalar<V> >::value> >
+ {
+ using type = Vc::SimdArray<S, Simd::lanes<V>()>;
+ };
+
+ //! should have a member type \c type
+ /**
+ * Implements Simd::Rebind
+ *
+ * This specialization covers
+ * - Mask -> non-Vc-vectorizable type except bool
+ * - Vector -> non-Vc-vectorizable type except bool
+ */
+ template<class S, class V>
+ struct RebindType<S, V,
+ std::enable_if_t<VcImpl::IsVector<V>::value &&
+ !VcImpl::IsVectorizable<S>::value &&
+ !std::is_same<S, bool>::value &&
+ !std::is_same<S, Scalar<V> >::value> >
+ {
+ using type = LoopSIMD<S, Simd::lanes<V>()>;
+ };
+
+ //! should be derived from an Dune::index_constant
+ /**
+ * Implements Simd::lanes()
+ */
+ template<class V>
+ struct LaneCount<V, std::enable_if_t<VcImpl::IsVector<V>::value> >
+ : public index_constant<V::size()>
+ { };
+
+ //! implements Simd::lane()
+ template<class V>
+ VcImpl::Proxy<V> lane(ADLTag<5, VcImpl::IsVector<V>::value>,
+ std::size_t l, V &v)
+ {
+ return { l, v };
+ }
+
+ //! implements Simd::lane()
+ template<class V>
+ Scalar<V> lane(ADLTag<5, VcImpl::IsVector<V>::value>,
+ std::size_t l, const V &v)
+ {
+ return v[l];
+ }
+
+ //! implements Simd::lane()
+ /*
+ * The hack with the SFINAE is necessary, because if I use just
+ * Scalar<V> as the return type, the compiler still errors out if V is
+ * an lvalue-reference T&. You'd think he'd notice that he can't
+ * instantiate this declaration for this template parameter, and would
+ * simply remove it from the overload set, but no...
+ */
+ template<class V,
+ class = std::enable_if_t<!std::is_reference<V>::value> >
+ Scalar<V> lane(ADLTag<5, VcImpl::IsVector<V>::value>,
+ std::size_t l, V &&v)
+ {
+ return std::forward<V>(v)[l];
+ }
+
+ //! implements Simd::cond()
+ template<class V>
+ V cond(ADLTag<5, VcImpl::IsVector<V>::value &&
+ !VcImpl::IsMask<V>::value>,
+ const Mask<V> &mask, const V &ifTrue, const V &ifFalse)
+ {
+ return Vc::iif(mask, ifTrue, ifFalse);
+ }
+
+ //! implements Simd::cond()
+ /*
+ * Kludge because iif seems to be unimplemented for masks
+ */
+ template<class V>
+ V cond(ADLTag<5, VcImpl::IsMask<V>::value>,
+ const V &mask, const V &ifTrue, const V &ifFalse)
+ {
+ return (mask && ifTrue) || (!mask && ifFalse);
+ }
+
+ //! implements binary Simd::max()
+ template<class V>
+ auto max(ADLTag<5, VcImpl::IsVector<V>::value &&
+ !VcImpl::IsMask<V>::value>,
+ const V &v1, const V &v2)
+ {
+ return Simd::cond(v1 < v2, v2, v1);
+ }
+
+ //! implements binary Simd::max()
+ template<class M>
+ auto max(ADLTag<5, VcImpl::IsMask<M>::value>,
+ const M &m1, const M &m2)
+ {
+ return m1 || m2;
+ }
+
+ //! implements binary Simd::min()
+ template<class V>
+ auto min(ADLTag<5, VcImpl::IsVector<V>::value &&
+ !VcImpl::IsMask<V>::value>,
+ const V &v1, const V &v2)
+ {
+ return Simd::cond(v1 < v2, v1, v2);
+ }
+
+ //! implements binary Simd::min()
+ template<class M>
+ auto min(ADLTag<5, VcImpl::IsMask<M>::value>,
+ const M &m1, const M &m2)
+ {
+ return m1 && m2;
+ }
+
+ //! implements Simd::anyTrue()
+ template<class M>
+ bool anyTrue (ADLTag<5, VcImpl::IsMask<M>::value>, const M &mask)
+ {
+ return Vc::any_of(mask);
+ }
+
+ //! implements Simd::allTrue()
+ template<class M>
+ bool allTrue (ADLTag<5, VcImpl::IsMask<M>::value>, const M &mask)
+ {
+ return Vc::all_of(mask);
+ }
+
+ // nothing like anyFalse() in Vc, so let defaults.hh handle it
+
+ //! implements Simd::allFalse()
+ template<class M>
+ bool allFalse(ADLTag<5, VcImpl::IsMask<M>::value>, const M &mask)
+ {
+ return Vc::none_of(mask);
+ }
+
+ //! implements Simd::maxValue()
+ template<class V>
+ auto max(ADLTag<5, VcImpl::IsVector<V>::value &&
+ !VcImpl::IsMask<V>::value>,
+ const V &v)
+ {
+ return v.max();
+ }
+
+ //! implements Simd::maxValue()
+ template<class M>
+ bool max(ADLTag<5, VcImpl::IsMask<M>::value>, const M &mask)
+ {
+ return Vc::any_of(mask);
+ }
+
+ //! implements Simd::minValue()
+ template<class V>
+ auto min(ADLTag<5, VcImpl::IsVector<V>::value &&
+ !VcImpl::IsMask<V>::value>,
+ const V &v)
+ {
+ return v.min();
+ }
+
+ //! implements Simd::minValue()
+ template<class M>
+ bool min(ADLTag<5, VcImpl::IsMask<M>::value>, const M &mask)
+ {
+ return !Vc::any_of(!mask);
+ }
+
+ //! implements Simd::maskAnd()
+ template<class S1, class V2>
+ auto maskAnd(ADLTag<5, std::is_same<Mask<S1>, bool>::value &&
+ VcImpl::IsVector<V2>::value>,
+ const S1 &s1, const V2 &v2)
+ {
+ return Simd::Mask<V2>(Simd::mask(s1)) && Simd::mask(v2);
+ }
+
+ //! implements Simd::maskAnd()
+ template<class V1, class S2>
+ auto maskAnd(ADLTag<5, VcImpl::IsVector<V1>::value &&
+ std::is_same<Mask<S2>, bool>::value>,
+ const V1 &v1, const S2 &s2)
+ {
+ return Simd::mask(v1) && Simd::Mask<V1>(Simd::mask(s2));
+ }
+
+ //! implements Simd::maskOr()
+ template<class S1, class V2>
+ auto maskOr(ADLTag<5, std::is_same<Mask<S1>, bool>::value &&
+ VcImpl::IsVector<V2>::value>,
+ const S1 &s1, const V2 &v2)
+ {
+ return Simd::Mask<V2>(Simd::mask(s1)) || Simd::mask(v2);
+ }
+
+ //! implements Simd::maskOr()
+ template<class V1, class S2>
+ auto maskOr(ADLTag<5, VcImpl::IsVector<V1>::value &&
+ std::is_same<Mask<S2>, bool>::value>,
+ const V1 &v1, const S2 &s2)
+ {
+ return Simd::mask(v1) || Simd::Mask<V1>(Simd::mask(s2));
+ }
+
+ //! @} group SIMDVc
+
+ } // namespace Overloads
+
+ } // namespace Simd
+
+ /*
+ * Specialize IsNumber for Vc::SimdArray and Vc::Vector to be able to use
+ * it as a scalar in DenseMatrix etc.
+ */
+ template <typename T, std::size_t N>
+ struct IsNumber<Vc::SimdArray<T, N>>
+ : public std::integral_constant<bool, IsNumber<T>::value> {
+ };
+
+ template <typename T, typename Abi>
+ struct IsNumber<Vc::Vector<T, Abi>>
+ : public std::integral_constant<bool, IsNumber<T>::value> {
+ };
+
+ //! Specialization of AutonomousValue for Vc proxies
+ template<class V>
+ struct AutonomousValueType<Simd::VcImpl::Proxy<V> > :
+ AutonomousValueType<typename Simd::VcImpl::Proxy<V>::value_type> {};
+
+} // namespace Dune
+
+#endif // DUNE_COMMON_SIMD_VC_HH
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_SINGLETON_HH
+#define DUNE_SINGLETON_HH
+
+#include <dune/common/visibility.hh>
+
+/**
+ * @file
+ * @brief Useful wrapper for creating singletons.
+ *
+ * Inspired by the article
+ * <a href="http://www.codeguru.com/cpp/cpp/cpp_mfc/singletons/article.php/c755/">CodeGuru: A Leak-Free Singleton class</a>
+ */
+namespace Dune
+{
+ /**
+ * @brief An adapter to turn a class into a singleton.
+ *
+ * The class represented by the template parameter T must
+ * have a parameterless constructor.
+ *
+ * Class T can be publicly derived from Singleton<T>:
+ *
+ * \code
+ * #include<dune/common/singleton.hh>
+ * class Foo : public Dune::Singleton<Foo>
+ * {
+ * public:
+ * Foo()
+ * {
+ * bytes = new char[1000];
+ * }
+ *
+ * ~Foo()
+ * {
+ * delete[] bytes;
+ * }
+ * private:
+ * char* bytes;
+ * };
+ * \endcode
+ *
+ * Or one can construct a Singleton of an existing class. Say Foo1 is a class
+ * with parameterless constructor then
+ * \code
+ * typedef Dune::Singleton<Foo1> FooSingleton;
+ * Foo1 instance& = FooSingleton::instance();
+ * \endcode
+ * Creates a singleton of that class and accesses its instance.
+ */
+ template<class T>
+ class Singleton
+ {
+ protected:
+ /* @brief Protected constructor. */
+ Singleton() = default;
+
+ public:
+
+ Singleton(const Singleton&) = delete;
+ void operator=(const Singleton&) = delete;
+
+ /**
+ * @brief Get the instance of the singleton.
+ * @return The instance of the singleton.
+ */
+ DUNE_EXPORT static T& instance()
+ {
+ static T instance_;
+ return instance_;
+ }
+ };
+
+} // namespace Dune
+
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_SLLIST_HH
+#define DUNE_SLLIST_HH
+
+#include <memory>
+#include <cassert>
+#include "iteratorfacades.hh"
+#include <ostream>
+
+namespace Dune
+{
+ /**
+ * @addtogroup Common
+ *
+ * @{
+ */
+ /**
+ * @file
+ * \brief Implements a singly linked list together with
+ * the necessary iterators.
+ * @author Markus Blatt
+ */
+ template<typename T, class A>
+ class SLListIterator;
+
+ template<typename T, class A>
+ class SLListConstIterator;
+
+ template<typename T, class A>
+ class SLListModifyIterator;
+
+ /**
+ * @brief A single linked list.
+ *
+ * The list is capable of insertions at the front and at
+ * the end and of removing elements at the front. Those
+ * operations require constant time.
+ */
+ template<typename T, class A=std::allocator<T> >
+ class SLList
+ {
+ struct Element;
+ friend class SLListIterator<T,A>;
+ friend class SLListConstIterator<T,A>;
+
+ public:
+
+ /**
+ * @brief The size type.
+ */
+ typedef typename A::size_type size_type;
+
+ /**
+ * @brief The type we store.
+ */
+ typedef T MemberType;
+
+ /**
+ * @brief The allocator to use.
+ */
+ using Allocator = typename std::allocator_traits<A>::template rebind_alloc<Element>;
+
+ /**
+ * @brief The mutable iterator of the list.
+ */
+ typedef SLListIterator<T,A> iterator;
+
+ /**
+ * @brief The constant iterator of the list.
+ */
+ typedef SLListConstIterator<T,A> const_iterator;
+
+ /**
+ * @brief Constructor.
+ */
+ SLList();
+
+ /**
+ * @brief Copy constructor with type conversion.
+ */
+ template<typename T1, typename A1>
+ SLList(const SLList<T1,A1>& other);
+
+ /**
+ * @brief Copy constructor.
+ */
+ SLList(const SLList<T,A>& other);
+
+ /**
+ * @brief Destructor.
+ *
+ * Deallocates all elements in the list.
+ */
+ ~SLList();
+
+ /**
+ * @brief The type of the iterator capable of deletion
+ * and insertion.
+ */
+ typedef SLListModifyIterator<T,A> ModifyIterator;
+
+ /**
+ * @brief Assignment operator.
+ */
+ SLList<T,A>& operator=(const SLList<T,A>& other);
+
+
+ /**
+ * @brief Add a new entry to the end of the list.
+ * @param item The item to add.
+ */
+ inline void push_back(const MemberType& item);
+
+ /**
+ * @brief Add a new entry to the beginning of the list.
+ * @param item The item to add.
+ */
+ inline void push_front(const MemberType& item);
+
+ /**
+ * @brief Remove the first item in the list.
+ */
+ inline void pop_front();
+
+ /** @brief Remove all elements from the list. */
+ inline void clear();
+
+ /**
+ * @brief Get an iterator pointing to the first
+ * element in the list.
+ *
+ * @return An iterator pointing to the first
+ * element or the end if the list is empty.
+ */
+ inline iterator begin();
+
+ /**
+ * @brief Get an iterator pointing to the first
+ * element in the list.
+ *
+ * @return An iterator pointing to the first
+ * element or the end if the list is empty.
+ */
+ inline const_iterator begin() const;
+
+ /**
+ * @brief Get an iterator capable of deleting and
+ * inserting elements.
+ *
+ * @return Modifying iterator positioned at the beginning
+ * of the list.
+ */
+ inline ModifyIterator beginModify();
+
+ /**
+ * @brief Get an iterator capable of deleting and
+ * inserting elements.
+ *
+ * @return Modifying iterator positioned after the end
+ * of the list.
+ */
+ inline ModifyIterator endModify();
+
+ /**
+ * @brief Get an iterator pointing to the
+ * end of the list.
+ *
+ * @return An iterator pointing to the end.
+ */
+ inline iterator end();
+
+ /**
+ * @brief Get an iterator pointing to the
+ * end of the list.
+ *
+ * @return An iterator pointing to the end.
+ */
+ inline const_iterator end() const;
+
+ /**
+ * @brief Check whether the list is empty.
+ *
+ * @return True if the list is empty;
+ */
+ inline bool empty() const;
+
+ /**
+ * @brief Get the number of elements the list
+ * contains.
+ */
+ inline int size() const;
+
+ bool operator==(const SLList& sl) const;
+
+
+ bool operator!=(const SLList& sl) const;
+
+ private:
+ /** \todo Please doc me! */
+ struct Element
+ {
+ /**
+ * @brief The next element in the list.
+ */
+ Element* next_;
+ /**
+ * @brief The element we hold.
+ */
+ MemberType item_;
+
+ Element(const MemberType& item, Element* next_=0);
+
+ Element();
+
+ ~Element();
+ };
+
+ /**
+ * @brief Delete the next element in the list.
+ * @param current Element whose next element should be deleted.
+ */
+ void deleteNext(Element* current);
+
+ /**
+ * @brief Copy the elements from another list.
+ * @param other The other list.
+ */
+ void copyElements(const SLList<T,A>& other);
+
+ /**
+ * @brief Delete the next element in the list.
+ *
+ * If the template parameter watchForTail is true, it is checked whether
+ * the deleted element is the tail and therefore the tail must be updated.
+ * @param current Element whose next element should be deleted.
+ */
+ template<bool watchForTail>
+ void deleteNext(Element* current);
+ /**
+ * @brief Insert an element after another one in the list.
+ * @param current The element after which we insert.
+ * @param item The item to insert.
+ */
+ void insertAfter(Element* current, const T& item);
+
+ /** @brief Pseudo element before the first entry. */
+ Element beforeHead_;
+
+ /**
+ * @brief Pointer to he last element in the list.
+ *
+ * If list is empty this will point to beforeHead_
+ */
+ Element* tail_;
+
+ /** @brief The allocator we use. */
+ Allocator allocator_;
+
+ /** brief The number of elements the list holds. */
+ int size_;
+ };
+
+ /**
+ * @brief A mutable iterator for the SLList.
+ */
+ template<typename T, class A>
+ class SLListIterator : public Dune::ForwardIteratorFacade<SLListIterator<T,A>, T, T&, std::size_t>
+ {
+ friend class SLListConstIterator<T,A>;
+ friend class SLListModifyIterator<T,A>;
+ friend class SLList<T,A>;
+
+ public:
+ inline SLListIterator(typename SLList<T,A>::Element* item,
+ SLList<T,A>* sllist)
+ : current_(item), list_(sllist)
+ {}
+
+ inline SLListIterator()
+ : current_(0), list_(0)
+ {}
+
+ inline SLListIterator(const SLListModifyIterator<T,A>& other)
+ : current_(other.iterator_.current_), list_(other.iterator_.list_)
+ {}
+
+ /**
+ * @brief Dereferencing function for the iterator facade.
+ * @return A reference to the element at the current position.
+ */
+ inline T& dereference() const
+ {
+ return current_->item_;
+ }
+
+ /**
+ * @brief Equality test for the iterator facade.
+ * @param other The other iterator to check.
+ * @return true If the other iterator is at the same position.
+ */
+ inline bool equals(const SLListConstIterator<T,A>& other) const
+ {
+ return current_==other.current_;
+ }
+
+ /**
+ * @brief Equality test for the iterator facade.
+ * @param other The other iterator to check.
+ * @return true If the other iterator is at the same position.
+ */
+ inline bool equals(const SLListIterator<T,A>& other) const
+ {
+ return current_==other.current_;
+ }
+
+ /**
+ * @brief Equality test for the iterator facade.
+ * @param other The other iterator to check.
+ * @return true If the other iterator is at the same position.
+ */
+ inline bool equals(const SLListModifyIterator<T,A>& other) const
+ {
+ return current_==other.iterator_.current_;
+ }
+
+ /**
+ * @brief Increment function for the iterator facade.
+ */
+ inline void increment()
+ {
+ current_ = current_->next_;
+ }
+
+ /**
+ * @brief Insert an element in the underlying list after
+ * the current position.
+ * @param v The value to insert.
+ */
+ inline void insertAfter(const T& v) const
+ {
+ assert(list_ );
+ list_->insertAfter(current_, v);
+ }
+
+ /**
+ * @brief Delete the entry after the current position.
+ *
+ * @warning This will invalidate all iterators positioned at the delete position! Use with care!
+ */
+ inline void deleteNext() const
+ {
+ assert(list_);
+ list_->deleteNext(current_);
+ }
+
+ private:
+ /** @brief The current element. */
+ typename SLList<T,A>::Element* current_;
+ /** @brief The list we iterate over. */
+ SLList<T,A>* list_;
+ };
+
+ /**
+ * @brief A constant iterator for the SLList.
+ */
+ template<class T, class A>
+ class SLListConstIterator : public Dune::ForwardIteratorFacade<SLListConstIterator<T,A>, const T, const T&, std::size_t>
+ {
+ friend class SLListIterator<T,A>;
+ friend class SLList<T,A>;
+
+ public:
+ inline SLListConstIterator()
+ : current_(0)
+ {}
+
+ inline SLListConstIterator(typename SLList<T,A>::Element* item)
+ : current_(item)
+ {}
+
+ inline SLListConstIterator(const SLListIterator<T,A>& other)
+ : current_(other.current_)
+ {}
+
+ inline SLListConstIterator(const SLListConstIterator<T,A>& other)
+ : current_(other.current_)
+ {}
+
+ inline SLListConstIterator(const SLListModifyIterator<T,A>& other)
+ : current_(other.iterator_.current_)
+ {}
+
+ /**
+ * @brief Dereferencing function for the facade.
+ * @return A reference to the element at the current position.
+ */
+ inline const T& dereference() const
+ {
+ return current_->item_;
+ }
+
+ /**
+ * @brief Equality test for the iterator facade.
+ * @param other The other iterator to check.
+ * @return true If the other iterator is at the same position.
+ */
+ inline bool equals(const SLListConstIterator<T,A>& other) const
+ {
+ return current_==other.current_;
+ }
+
+ /**
+ * @brief Increment function for the iterator facade.
+ */
+ inline void increment()
+ {
+ current_ = current_->next_;
+ }
+
+ private:
+ /** @brief The current element. */
+ typename SLList<T,A>::Element* current_;
+ };
+
+ /**
+ * @brief A mutable iterator for the SLList.
+ */
+ template<typename T, class A>
+ class SLListModifyIterator : public Dune::ForwardIteratorFacade<SLListModifyIterator<T,A>, T, T&, std::size_t>
+ {
+ friend class SLListConstIterator<T,A>;
+ friend class SLListIterator<T,A>;
+ public:
+ inline SLListModifyIterator(SLListIterator<T,A> beforeIterator,
+ SLListIterator<T,A> _iterator)
+ : beforeIterator_(beforeIterator), iterator_(_iterator)
+ {}
+
+ inline SLListModifyIterator(const SLListModifyIterator<T,A>& other)
+ : beforeIterator_(other.beforeIterator_), iterator_(other.iterator_)
+ {}
+
+ inline SLListModifyIterator()
+ : beforeIterator_(), iterator_()
+ {}
+
+ /**
+ * @brief Dereferencing function for the iterator facade.
+ * @return A reference to the element at the current position.
+ */
+ inline T& dereference() const
+ {
+ return *iterator_;
+ }
+
+ /**
+ * @brief Test whether another iterator is equal.
+ * @return true if the other iterator is at the same position as
+ * this one.
+ */
+ inline bool equals(const SLListConstIterator<T,A>& other) const
+ {
+ return iterator_== other;
+ }
+
+
+ /**
+ * @brief Test whether another iterator is equal.
+ * @return true if the other iterator is at the same position as
+ * this one.
+ */
+ inline bool equals(const SLListIterator<T,A>& other) const
+ {
+ return iterator_== other;
+ }
+
+
+ /**
+ * @brief Test whether another iterator is equal.
+ * @return true if the other iterator is at the same position as
+ * this one.
+ */
+ inline bool equals(const SLListModifyIterator<T,A>& other) const
+ {
+ return iterator_== other.iterator_;
+ }
+
+ /**
+ * @brief Increment function for the iterator facade.
+ */
+ inline void increment()
+ {
+ ++iterator_;
+ ++beforeIterator_;
+ }
+
+ /**
+ * @brief Insert an element at the current position.
+ *
+ * Starting from the element at the current position all
+ * elements will be shifted by one position to the back.
+ * The iterator will point to the same element as before
+ * after the insertion, i.e the number of increments to
+ * reach the same position from a begin iterator increases
+ * by one.
+ * This means the inserted element is the one before the one
+ * the iterator points to.
+ * @param v The value to insert.
+ */
+ inline void insert(const T& v)
+ {
+ beforeIterator_.insertAfter(v);
+ ++beforeIterator_;
+ }
+
+ /**
+ * @brief Delete the entry at the current position.
+ *
+ * The iterator will be positioned at the next position after the
+ * deletion
+ * @warning This will invalidate all iterators positioned at the delete position! Use with care!
+ */
+ inline void remove()
+ {
+ ++iterator_;
+ beforeIterator_.deleteNext();
+ }
+
+ private:
+ /** @brief Iterator positioned at the position before the current. */
+ SLListIterator<T,A> beforeIterator_;
+ /** @brief Iterator positioned at the current position. */
+ SLListIterator<T,A> iterator_;
+ };
+
+ template<typename T, typename A>
+ std::ostream& operator<<(std::ostream& os, const SLList<T,A>& sllist)
+ {
+ typedef typename SLList<T,A>::const_iterator Iterator;
+ Iterator end = sllist.end();
+ Iterator current= sllist.begin();
+
+ os << "{ ";
+
+ if(current!=end) {
+ os<<*current<<" ("<<static_cast<const void*>(&(*current))<<")";
+ ++current;
+
+ for(; current != end; ++current)
+ os<<", "<<*current<<" ("<<static_cast<const void*>(&(*current))<<")";
+ }
+ os<<"} ";
+ return os;
+ }
+
+ template<typename T, class A>
+ SLList<T,A>::Element::Element(const MemberType& item, Element* next)
+ : next_(next), item_(item)
+ {}
+
+ template<typename T, class A>
+ SLList<T,A>::Element::Element()
+ : next_(0), item_()
+ {}
+
+ template<typename T, class A>
+ SLList<T,A>::Element::~Element()
+ {
+ next_=0;
+ }
+
+ template<typename T, class A>
+ SLList<T,A>::SLList()
+ : beforeHead_(), tail_(&beforeHead_), allocator_(), size_(0)
+ {
+ beforeHead_.next_=0;
+ assert(&beforeHead_==tail_);
+ assert(tail_->next_==0);
+ }
+
+ template<typename T, class A>
+ SLList<T,A>::SLList(const SLList<T,A>& other)
+ : beforeHead_(), tail_(&beforeHead_), allocator_(), size_(0)
+ {
+ copyElements(other);
+ }
+
+ template<typename T, class A>
+ template<typename T1, class A1>
+ SLList<T,A>::SLList(const SLList<T1,A1>& other)
+ : beforeHead_(), tail_(&beforeHead_), allocator_(), size_(0)
+ {
+ copyElements(other);
+ }
+
+ template<typename T, typename A>
+ void SLList<T,A>::copyElements(const SLList<T,A>& other)
+ {
+ assert(tail_==&beforeHead_);
+ assert(size_==0);
+ typedef typename SLList<T,A>::const_iterator Iterator;
+ Iterator iend = other.end();
+ for(Iterator element=other.begin(); element != iend; ++element)
+ push_back(*element);
+
+ assert(other.size()==size());
+ }
+
+ template<typename T, class A>
+ SLList<T,A>::~SLList()
+ {
+ clear();
+ }
+
+ template<typename T, class A>
+ bool SLList<T,A>::operator==(const SLList& other) const
+ {
+ if(size()!=other.size())
+ return false;
+ for(const_iterator iter=begin(), oiter=other.begin();
+ iter != end(); ++iter, ++oiter)
+ if(*iter!=*oiter)
+ return false;
+ return true;
+ }
+
+ template<typename T, class A>
+ bool SLList<T,A>::operator!=(const SLList& other) const
+ {
+ if(size()==other.size()) {
+ for(const_iterator iter=begin(), oiter=other.begin();
+ iter != end(); ++iter, ++oiter)
+ if(*iter!=*oiter)
+ return true;
+ return false;
+ }else
+ return true;
+ }
+ template<typename T, class A>
+ SLList<T,A>& SLList<T,A>::operator=(const SLList<T,A>& other)
+ {
+ clear();
+ copyElements(other);
+ return *this;
+ }
+
+ template<typename T, class A>
+ inline void SLList<T,A>::push_back(const MemberType& item)
+ {
+ assert(size_>0 || tail_==&beforeHead_);
+ tail_->next_ = allocator_.allocate(1);
+ assert(size_>0 || tail_==&beforeHead_);
+ tail_ = tail_->next_;
+ ::new (static_cast<void*>(&(tail_->item_)))T(item);
+ tail_->next_=0;
+ assert(tail_->next_==0);
+ ++size_;
+ }
+
+ template<typename T, class A>
+ inline void SLList<T,A>::insertAfter(Element* current, const T& item)
+ {
+ assert(current);
+
+#ifndef NDEBUG
+ bool changeTail = (current == tail_);
+#endif
+
+ // Save old next element
+ Element* tmp = current->next_;
+
+ assert(!changeTail || !tmp);
+
+ // Allocate space
+ current->next_ = allocator_.allocate(1);
+
+ // Use copy constructor to initialize memory
+ std::allocator_traits<Allocator>::construct(allocator_, current->next_, Element(item,tmp));
+
+ //::new(static_cast<void*>(&(current->next_->item_))) T(item);
+
+ if(!current->next_->next_) {
+ // Update tail
+ assert(changeTail);
+ tail_ = current->next_;
+ }
+ ++size_;
+ assert(!tail_->next_);
+ }
+
+ template<typename T, class A>
+ inline void SLList<T,A>::push_front(const MemberType& item)
+ {
+ if(tail_ == &beforeHead_) {
+ // list was empty
+ beforeHead_.next_ = tail_ = allocator_.allocate(1, 0);
+ ::new(static_cast<void*>(&beforeHead_.next_->item_))T(item);
+ beforeHead_.next_->next_=0;
+ }else{
+ Element* added = allocator_.allocate(1, 0);
+ ::new(static_cast<void*>(&added->item_))T(item);
+ added->next_=beforeHead_.next_;
+ beforeHead_.next_=added;
+ }
+ assert(tail_->next_==0);
+ ++size_;
+ }
+
+
+ template<typename T, class A>
+ inline void SLList<T,A>::deleteNext(Element* current)
+ {
+ this->template deleteNext<true>(current);
+ }
+
+ template<typename T, class A>
+ template<bool watchForTail>
+ inline void SLList<T,A>::deleteNext(Element* current)
+ {
+ assert(current->next_);
+ Element* next = current->next_;
+
+ if(watchForTail)
+ if(next == tail_) {
+ // deleting last element changes tail!
+ tail_ = current;
+ }
+
+ current->next_ = next->next_;
+ std::allocator_traits<Allocator>::destroy(allocator_, next);
+ allocator_.deallocate(next, 1);
+ --size_;
+ assert(!watchForTail || &beforeHead_ != tail_ || size_==0);
+ }
+
+ template<typename T, class A>
+ inline void SLList<T,A>::pop_front()
+ {
+ deleteNext(&beforeHead_);
+ }
+
+ template<typename T, class A>
+ inline void SLList<T,A>::clear()
+ {
+ while(beforeHead_.next_ ) {
+ this->template deleteNext<false>(&beforeHead_);
+ }
+
+ assert(size_==0);
+ // update the tail!
+ tail_ = &beforeHead_;
+ }
+
+ template<typename T, class A>
+ inline bool SLList<T,A>::empty() const
+ {
+ return (&beforeHead_ == tail_);
+ }
+
+ template<typename T, class A>
+ inline int SLList<T,A>::size() const
+ {
+ return size_;
+ }
+
+ template<typename T, class A>
+ inline SLListIterator<T,A> SLList<T,A>::begin()
+ {
+ return iterator(beforeHead_.next_, this);
+ }
+
+ template<typename T, class A>
+ inline SLListConstIterator<T,A> SLList<T,A>::begin() const
+ {
+ return const_iterator(beforeHead_.next_);
+ }
+
+ template<typename T, class A>
+ inline SLListIterator<T,A> SLList<T,A>::end()
+ {
+ return iterator();
+ }
+
+ template<typename T, class A>
+ inline SLListModifyIterator<T,A> SLList<T,A>::endModify()
+ {
+ return SLListModifyIterator<T,A>(iterator(tail_, this),iterator());
+ }
+
+
+ template<typename T, class A>
+ inline SLListModifyIterator<T,A> SLList<T,A>::beginModify()
+ {
+ return SLListModifyIterator<T,A>(iterator(&beforeHead_, this),
+ iterator(beforeHead_.next_, this));
+ }
+
+ template<typename T, class A>
+ inline SLListConstIterator<T,A> SLList<T,A>::end() const
+ {
+ return const_iterator();
+ }
+
+ /** }@ */
+}
+#endif
--- /dev/null
+install(
+ FILES
+ apply.hh
+ functional.hh
+ make_array.hh
+ optional.hh
+ type_traits.hh
+ utility.hh
+ variant.hh
+ DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/dune/common/std)
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_COMMON_STD_APPLY_HH
+#define DUNE_COMMON_STD_APPLY_HH
+
+#include <tuple>
+
+namespace Dune
+{
+ namespace Std
+ {
+
+ /// Invoke the Callable object f with a tuple of arguments.
+ /// \deprecated Use `std::apply` directly.
+ using std::apply;
+
+ } // namespace Std
+} // namespace Dune
+
+#endif // #ifndef DUNE_COMMON_STD_APPLY_HH
--- /dev/null
+// -*- tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set ts=8 sw=2 et sts=2:
+#ifndef DUNE_COMMON_STD_FUNCTIONAL_HH
+#define DUNE_COMMON_STD_FUNCTIONAL_HH
+
+#include <functional>
+
+namespace Dune
+{
+
+ namespace Std
+ {
+
+ /**
+ * @brief A function object type whose operator() returns its argument unchanged
+ * @note Equivalent to: `return std::forward(t);`
+ * @warning When passing `r-values`, the result must be, at most, used for direct
+ * consumption in an outer function call
+ */
+#if DUNE_HAVE_CXX_STD_IDENTITY
+ using std::identity;
+#else //DUNE_HAVE_CXX_STD_IDENTITY
+ struct identity {
+ template<class T>
+ constexpr T&& operator()(T&& t ) const noexcept {return std::forward<T>(t);}
+ };
+#endif
+ } // namespace Std
+
+} // namespace Dune
+
+#endif // #ifndef DUNE_COMMON_STD_FUNCTIONAL_HH
--- /dev/null
+#ifndef DUNE_COMMON_STD_MAKE_ARRAY_HH
+#define DUNE_COMMON_STD_MAKE_ARRAY_HH
+
+#include <array>
+#include <type_traits>
+
+#if DUNE_HAVE_CXX_EXPERIMENTAL_MAKE_ARRAY
+#include <experimental/array>
+#endif
+
+namespace Dune {
+namespace Std {
+
+#if DUNE_HAVE_CXX_EXPERIMENTAL_MAKE_ARRAY
+
+ /// \deprecated Use deduction guide of `std::array` or `std::to_array`.
+ using std::experimental::make_array;
+
+#else // DUNE_HAVE_CXX_EXPERIMENTAL_MAKE_ARRAY
+
+ //! Create and initialize an array
+ /**
+ * \note This method is a somewhat limited dune-specific version of
+ * make_array() as proposed for C++17 (see <a
+ * href="http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2015/n4391.html">N4391</a>,
+ * accepted <a
+ * href="https://botondballo.wordpress.com/2015/06/05/trip-report-c-standards-meeting-in-lenexa-may-2015/">May
+ * 2015</a>). The differences are that this version should
+ * never be used with expliclitly given template arguments, or
+ * with std::reference_wrapper<...> arguments, and we do not
+ * give a diagnostic when anyone happens to do that.
+ *
+ * \ingroup CxxUtilities
+ * \deprecated Use deduction guide of `std::array` or `std::to_array`.
+ */
+ template <typename... Args>
+ std::array<typename std::common_type<Args...>::type, sizeof...(Args)>
+ make_array(const Args&... args) {
+ std::array<typename std::common_type<Args...>::type, sizeof...(Args)>
+ result = {{args...}};
+ return result;
+ }
+
+#endif // DUNE_HAVE_CXX_EXPERIMENTAL_MAKE_ARRAY
+
+}
+}
+
+#endif
--- /dev/null
+#ifndef DUNE_COMMON_STD_OPTIONAL_HH
+#define DUNE_COMMON_STD_OPTIONAL_HH
+
+#include <cassert>
+#include <functional>
+#include <stdexcept>
+#include <type_traits>
+#include <utility>
+
+#include <optional>
+
+#warning dune/common/std/optional.hh is deprecated and will be removed after Dune 2.8.\
+ Include <optional> instead
+
+namespace Dune
+{
+
+ namespace Std
+ {
+ // In case of C++ standard >= 17 we forward optionals into our namespace
+ template< class T >
+ using optional = std::optional< T >;
+
+ using nullopt_t = std::nullopt_t;
+ using in_place_t = std::in_place_t;
+
+ namespace
+ {
+ const std::nullopt_t nullopt = std::nullopt;
+ const std::in_place_t in_place = std::in_place;
+ } // anonymous namespace
+
+ using bad_optional_access = std::bad_optional_access;
+
+ } // namespace Std
+
+} // namespace Dune
+
+#endif // #ifndef DUNE_COMMON_STD_OPTIONAL_HH
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_COMMON_STD_TYPE_TRAITS_HH
+#define DUNE_COMMON_STD_TYPE_TRAITS_HH
+
+#include <type_traits>
+#include <dune/common/typetraits.hh>
+#include <dune/common/typeutilities.hh>
+
+#if __has_include(<experimental/type_traits>)
+#include <experimental/type_traits>
+#endif
+
+namespace Dune
+{
+
+//! Namespace for features backported from new C++ standards
+/**
+ * The namespace Dune::Std contains library features of new C++ standards and
+ * technical specifications backported to older compilers. Most features are
+ * detected and pulled into this namespace from the standard library if your
+ * compiler has native support. If it doesn't, we provide a fallback implementation
+ * on a best-effort basis.
+ *
+ * \ingroup CxxUtilities
+ */
+namespace Std
+{
+
+ // to_false_type
+ // -------------
+
+ /** \class to_false_type
+ *
+ * \brief template mapping a type to <tt>std::false_type</tt>
+ * \deprecated Use Dune::AlwaysFalse (from dune/common/typetraits.hh) instead
+ * \tparam T Some type
+ *
+ * Suppose you have a template class. You want to document the required
+ * members of this class in the non-specialized template, but you know that
+ * actually instantiating the non-specialized template is an error. You can
+ * try something like this:
+ * \code
+ * template<typename T>
+ * struct Traits
+ * {
+ * static_assert(false,
+ * "Instanciating this non-specialized template is an "
+ * "error. You should use one of the specializations "
+ * "instead.");
+ * //! The type used to frobnicate T
+ * typedef void FrobnicateType;
+ * };
+ * \endcode
+ * This will trigger static_assert() as soon as the compiler reads the
+ * definition for the Traits template, since it knows that "false" can never
+ * become true, no matter what the template parameters of Traits are. As a
+ * workaround you can use to_false_type: replace <tt>false</tt> by
+ * <tt>to_false_type<T>::value</tt>, like this:
+ * \code
+ * template<typename T>
+ * struct Traits
+ * {
+ * static_assert(Std::to_false_type<T>::value,
+ * "Instanciating this non-specialized template is an "
+ * "error. You should use one of the specializations "
+ * "instead.");
+ * //! The type used to frobnicate T
+ * typedef void FrobnicateType;
+ * };
+ * \endcode
+ * Since there might be an specialization of to_false_type for template
+ * parameter T, the compiler cannot trigger static_assert() until the type
+ * of T is known, that is, until Traits<T> is instantiated.
+ *
+ * \ingroup CxxUtilities
+ */
+ template< typename T >
+ struct [[deprecated("Will be removed after release 2.8. Use Dune::AlwaysFalse (from dune/common/typetraits.hh)")]] to_false_type : public std::false_type {};
+
+
+
+ // to_true_type
+ // ------------
+
+ /** \class to_true_type
+ *
+ * \brief template mapping a type to <tt>std::true_type</tt>
+ * \deprecated Use Dune::AlwaysFalse (from dune/common/typetraits.hh) instead
+ * \tparam T Some type
+ *
+ * \note This class exists mostly for consistency with to_false_type.
+ *
+ * \ingroup CxxUtilities
+ */
+ template< typename T >
+ struct [[deprecated("Will be removed after release 2.8. Use Dune::AlwaysTrue (from dune/common/typetraits.hh)")]] to_true_type : public std::true_type {};
+
+
+ /// A helper alias template std::bool_constant imported into the namespace Dune::Std
+ /// \deprecated Use the `std::bool_constant` directly.
+ using std::bool_constant;
+
+
+ namespace Impl {
+
+ // If R is void we only need to check if F can be called
+ // with given Args... list. If this is not possible
+ // result_of_t is not defined and this overload is disabled.
+ template<class R, class F, class... Args,
+ std::enable_if_t<
+ std::is_same<std::void_t<std::result_of_t<F(Args...)>>, R>::value
+ , int> = 0>
+ std::true_type is_callable_helper(PriorityTag<2>)
+ { return {}; }
+
+ // Check if result of F(Args...) can be converted to R.
+ // If F cannot even be called with given Args... then
+ // result_of_t is not defined and this overload is disabled.
+ template<class R, class F, class... Args,
+ std::enable_if_t<
+ std::is_convertible<std::result_of_t<F(Args...)>, R>::value
+ , int> = 0>
+ std::true_type is_callable_helper(PriorityTag<1>)
+ { return {}; }
+
+ // If none of the above matches, F can either not be called
+ // with given Args..., or the result cannot be converted to
+ // void, or R is not void.
+ template<class R, class F, class... Args>
+ std::false_type is_callable_helper(PriorityTag<0>)
+ { return {}; }
+ }
+
+ /**
+ * \brief Traits class to check if function is callable
+ * \deprecated Use std::is_invocable from <type_traits>
+ *
+ * \tparam D Function descriptor
+ * \tparam R Return value
+ *
+ * If D = F(Args...) this checks if F can be called with an
+ * argument list of type Args..., and if the return value can
+ * be converted to R. If R is void, any return type is accepted.
+ * The result is encoded by deriving from std::integral_constant<bool, result>.
+ *
+ * If D is not of the form D = F(Args...) this class is not defined.
+ *
+ * This implements std::is_callable as proposed in N4446 for C++17.
+ *
+ * \ingroup CxxUtilities
+ */
+ template <class D, class R= void>
+ struct is_callable;
+
+ /**
+ * \brief Traits class to check if function is callable
+ * \deprecated Use std::is_invocable from <type_traits>
+ *
+ * \tparam D Function descriptor
+ * \tparam R Return value
+ *
+ * If D = F(Args...) this checks if F can be called with an
+ * argument list of type Args..., and if the return value can
+ * be converted to R. If R is void, any return type is accepted.
+ * The result is encoded by deriving from std::integral_constant<bool, result>.
+ *
+ * If D is not of the form D = F(Args...) this class is not defined.
+ *
+ * This implements std::is_callable as proposed in N4446 for C++17.
+ *
+ * \ingroup CxxUtilities
+ */
+ template <class F, class... Args, class R>
+ struct [[deprecated("Use std::is_invocable from <type_traits>. Will be removed after release 2.8")]] is_callable< F(Args...), R> :
+ decltype(Impl::is_callable_helper<R, F, Args...>(PriorityTag<42>()))
+ {};
+
+
+ /**
+ * \brief Traits class to check if function is invocable
+ * \deprecated Use std::is_invocable from <type_traits>
+ *
+ * \tparam F Function to check
+ * \tparam Args Function arguments to check
+ *
+ * This checks if F can be called with an arguments list of type Args....
+ * The result is encoded by deriving from std::integral_constant<bool, result>.
+ *
+ * This implements std::is_invocable from C++17.
+ *
+ * \ingroup CxxUtilities
+ */
+ template <class F, class... Args>
+ struct [[deprecated("Use std::is_invocable from <type_traits>. Will be removed after release 2.8")]] is_invocable :
+ decltype(Impl::is_callable_helper<void, F, Args...>(PriorityTag<42>()))
+ {};
+
+ /**
+ * \brief Traits class to check if function is invocable and the return type is compatible
+ * \deprecated Use std::is_invocable_r from <type_traits>
+ *
+ * \tparam R Desired result type
+ * \tparam F Function to check
+ * \tparam Args Function arguments to check
+ *
+ * This checks if F can be called with an arguments list of type Args..., and
+ * if the return value can be converted to R.
+ * The result is encoded by deriving from std::integral_constant<bool, result>.
+ *
+ * This implements std::is_invocable_r from C++17.
+ *
+ * \ingroup CxxUtilities
+ */
+ template <class R, class F, class... Args>
+ struct [[deprecated("Use std::is_invocable_r from <type_traits>. Will be removed after release 2.8")]] is_invocable_r :
+ decltype(Impl::is_callable_helper<R, F, Args...>(PriorityTag<42>()))
+ {};
+
+
+#if DUNE_HAVE_CXX_EXPERIMENTAL_IS_DETECTED
+
+ using std::experimental::nonesuch;
+ using std::experimental::detected_or;
+ using std::experimental::is_detected;
+ using std::experimental::detected_t;
+ using std::experimental::is_detected_v;
+ using std::experimental::detected_or_t;
+ using std::experimental::is_detected_exact;
+ using std::experimental::is_detected_exact_v;
+ using std::experimental::is_detected_convertible;
+ using std::experimental::is_detected_convertible_v;
+
+#else // DUNE_HAVE_CXX_EXPERIMENTAL_IS_DETECTED
+
+ // fallback version of std::experimental::is_detected et al., heavily scribbled
+ // from cppreference.com (but there is actually not much implementation to the thing)
+
+#ifndef DOXYGEN
+
+ namespace Impl {
+
+ // default version of detector, this gets matched on failure
+ template<typename Default, typename Void, template<typename...> class Op, typename... Args>
+ struct detector
+ {
+ using value_t = std::false_type;
+ using type = Default;
+ };
+
+ // specialization of detector that matches if Op<Args...> can be instantiated
+ template<typename Default, template<typename...> class Op, typename... Args>
+ struct detector<Default, std::void_t<Op<Args...>>, Op, Args...>
+ {
+ using value_t = std::true_type;
+ using type = Op<Args...>;
+ };
+
+ }
+
+#endif // DOXYGEN
+
+ //! Type representing a lookup failure by std::detected_or and friends.
+ /**
+ * This type cannot be constructed, destroyed or copied.
+ *
+ * \note This functionality is part of the C++ library fundamentals TS v2 and might
+ * or might not became part of C++2a.
+ *
+ * \ingroup CxxUtilities
+ */
+ struct nonesuch
+ {
+ nonesuch() = delete;
+ ~nonesuch() = delete;
+ nonesuch(const nonesuch&) = delete;
+ void operator=(const nonesuch&) = delete;
+ };
+
+ //! Detects whether `Op<Args...>` is valid and makes the result available.
+ /**
+ * This alias template is an alias for an unspecified class type with two
+ * nested `typedefs` `value_t` and `type`. It can be used to detect whether
+ * the meta function call `Op<Args...>` is valid and access the result of
+ * the call by inspecting the returned type, which is defined as follows:
+ *
+ * * If `Op<Args...>` can be instantiated, `value_t` is an alias for `std::true_type`
+ * and `type` is an alias for `Op<Args...>`.
+ * * If `Op<Args...>` is invalid, `value_t` is an alias for `std::false_type`
+ * and `type` is an alias for `Default`.
+ *
+ * This can be used to safely extract a nested `typedef` from a type `T` that
+ * might not define the `typedef`:
+ \code
+ struct A { using size_type = int ; };
+ struct B;
+
+ template<typename T>
+ using SizeType = typename T::size_type;
+
+ // this extracts the nested typedef for int
+ using st_a = typename detected_or<std::size_t,SizeType,A>::type;
+ // as there is no nested typedef in B, this yields std::size_t
+ using st_b = typename detected_or<std::size_t,SizeType,B>::type;
+ \endcode
+ *
+ * \note This functionality is part of the C++ library fundamentals TS v2 and might
+ * or might not became part of C++2a.
+ *
+ * \ingroup CxxUtilities
+ */
+ template<typename Default, template<typename...> class Op, typename... Args>
+ using detected_or = Impl::detector<Default,void,Op,Args...>;
+
+ //! Detects whether `Op<Args...>` is valid.
+ /**
+ * This alias template checks whether `Op<Args...>` can be instantiated. It is
+ * equivalent to `typename detected_or<nonesuch,Op,Args...>::value_t`.
+ *
+ * \note This functionality is part of the C++ library fundamentals TS v2 and might
+ * or might not became part of C++2a.
+ *
+ * \ingroup CxxUtilities
+ */
+ template<template<typename...> class Op, typename... Args>
+ using is_detected = typename detected_or<nonesuch,Op,Args...>::value_t;
+
+#ifdef __cpp_variable_templates
+ //! Detects whether `Op<Args...>` is valid and makes the result available as a value.
+ /**
+ * This constexpr variable checks whether `Op<Args...>` can be instantiated. It is
+ * equivalent to `is_detected<Op,Args...>::value`.
+ *
+ * \note This functionality is part of the C++ library fundamentals TS v2 and might
+ * or might not became part of C++2a.
+ *
+ * \ingroup CxxUtilities
+ */
+ template<template<typename...> class Op, typename... Args>
+ constexpr bool is_detected_v = is_detected<Op,Args...>::value;
+#endif // __cpp_variable_templates
+
+ //! Returns `Op<Args...>` if that is valid; otherwise returns nonesuch.
+ /**
+ * This alias template can be used to instantiate `Op<Args...>` in a context that is
+ * not SFINAE-safe by appropriately wrapping the instantiation. If instantiation fails,
+ * the marker type nonesuch is returned instead.
+ *
+ * \note This functionality is part of the C++ library fundamentals TS v2 and might
+ * or might not became part of C++2a.
+ *
+ * \ingroup CxxUtilities
+ */
+ template<template<typename...> class Op, typename... Args>
+ using detected_t = typename detected_or<nonesuch,Op,Args...>::type;
+
+
+ //! Returns `Op<Args...>` if that is valid; otherwise returns the fallback type `Default`.
+ /**
+ * This alias template can be used to instantiate `Op<Args...>` in a context that is
+ * not SFINAE-safe by appropriately wrapping the instantiation and automatically falling back
+ * to `Default` if instantiation fails.
+ *
+ * \note This functionality is part of the C++ library fundamentals TS v2 and might
+ * or might not became part of C++2a.
+ *
+ * \ingroup CxxUtilities
+ */
+ template<typename Default, template<typename...> class Op, typename... Args>
+ using detected_or_t = typename detected_or<Default,Op,Args...>::type;
+
+ //! Checks whether `Op<Args...>` is `Expected` without causing an error if `Op<Args...>` is invalid.
+ /**
+ * \note This functionality is part of the C++ library fundamentals TS v2 and might
+ * or might not became part of C++2a.
+ *
+ * \ingroup CxxUtilities
+ */
+ template<typename Expected, template<typename...> class Op, typename... Args>
+ using is_detected_exact = std::is_same<Expected,detected_t<Op,Args...>>;
+
+#ifdef __cpp_variable_templates
+ //! Convenient access to the result value of is_detected_exact.
+ /**
+ * \note This functionality is part of the C++ library fundamentals TS v2 and might
+ * or might not became part of C++2a.
+ *
+ * \ingroup CxxUtilities
+ */
+ template<typename Expected, template<typename...> class Op, typename... Args>
+ constexpr bool is_detected_exact_v = is_detected_exact<Expected,Op,Args...>::value;
+#endif // __cpp_variable_templates
+
+ //! Checks whether `Op<Args...>` is convertible to `Target` without causing an error if `Op<Args...>` is invalid.
+ /**
+ * \note This functionality is part of the C++ library fundamentals TS v2 and might
+ * or might not became part of C++2a.
+ *
+ * \ingroup CxxUtilities
+ */
+ template<typename Target, template<typename...> class Op, typename... Args>
+ using is_detected_convertible = std::is_convertible<Target,detected_t<Op,Args...>>;
+
+#ifdef __cpp_variable_templates
+ //! Convenient access to the result value of is_detected_convertible.
+ /**
+ * \note This functionality is part of the C++ library fundamentals TS v2 and might
+ * or might not became part of C++2a.
+ *
+ * \ingroup CxxUtilities
+ */
+ template<typename Target, template<typename...> class Op, typename... Args>
+ constexpr bool is_detected_convertible_v = is_detected_convertible<Target,Op,Args...>::value;
+#endif // __cpp_variable_templates
+
+#endif // DUNE_HAVE_CXX_EXPERIMENTAL_IS_DETECTED
+
+
+
+ // conjunction
+ // -----------
+
+ /**
+ * \brief forms the logical conjunction of the type traits B...
+ *
+ * \note This functionality is part of the C++17 standard.
+ *
+ * \ingroup CxxUtilities
+ **/
+ template< class... B >
+ struct [[deprecated("Will be removed after release 2.8. Use std::conjuction instead.")]] conjunction
+ : std::conjunction<B...>
+ {};
+
+
+ // disjunction
+ // -----------
+
+ /**
+ * \brief forms the logical disjunction of the type traits B...
+ *
+ * \note This functionality is part of the C++17 standard.
+ *
+ * \ingroup CxxUtilities
+ **/
+ template< class... B >
+ struct [[deprecated("Will be removed after release 2.8. Use std::disjunction instead.")]] disjunction
+ : std::disjunction<B...>
+ {};
+
+
+ // negation
+ // --------
+
+ /**
+ * \brief forms the logical negation of the type traits B...
+ *
+ * \note This functionality is part of the C++17 standard.
+ *
+ * \ingroup CxxUtilities
+ **/
+ template<class B>
+ struct [[deprecated("Will be removed after release 2.8. Use std::negation instead.")]] negation
+ : std::negation<B>
+ {};
+
+} // namespace Std
+
+} // namespace Dune
+
+#endif // #ifndef DUNE_COMMON_STD_TYPE_TRAITS_HH
--- /dev/null
+#ifndef DUNE_COMMON_STD_UTILITY_HH
+#define DUNE_COMMON_STD_UTILITY_HH
+
+#include <utility>
+
+#warning dune/common/std/utility.hh is deprecated and will be removed after Dune 2.8.\
+ Include <utility> instead
+
+namespace Dune
+{
+
+ namespace Std
+ {
+
+ using std::integer_sequence;
+ using std::index_sequence;
+ using std::make_integer_sequence;
+ using std::make_index_sequence;
+ using std::index_sequence_for;
+
+ } // namespace Std
+
+} // namespace Dune
+
+#endif // #ifndef DUNE_COMMON_STD_UTILITY_HH
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_COMMON_STD_VARIANT_HH
+#define DUNE_COMMON_STD_VARIANT_HH
+
+#warning dune/common/std/variant.hh is deprecated and will be removed after Dune 2.8.\
+ Include <variant> instead
+
+#include <variant>
+
+namespace Dune {
+namespace Std {
+ using std::variant;
+ using std::visit;
+ using std::variant_size;
+ using std::variant_size_v;
+ using std::get;
+ using std::get_if;
+ using std::holds_alternative;
+ using std::monostate;
+}
+}
+
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "stdstreams.hh"
+
+namespace Dune {
+
+ /*
+
+ The standard debug streams declared in stdstreams.hh exist in this
+ file so that they can be compiled into libdune
+
+ */
+
+ /* stream for very verbose output: information on the lowest
+ level. This is expected to report insane amounts of
+ information. Use of the activation-flag to only generate output
+ near the problem is recommended */
+ DVVerbType dvverb(std::cout);
+
+ /* stream for verbose output: information that helps to trace in
+ more detail what the modules do */
+ DVerbType dverb(std::cout);
+
+ /* stream for informative output: summary infos on what a module
+ does, runtimes, etc. */
+ DInfoType dinfo(std::cout);
+
+ /* stream for warnings: messages which may indicate problems */
+ DWarnType dwarn(std::cerr);
+
+ /* stream for strong warnings: when a failure */
+ DGraveType dgrave(std::cerr);
+
+ /* stream for error messages: only packages integrating Dune
+ completely will redirect it. The output of derr is independent of
+ the debug-level, only the activation-flag is checked */
+ DErrType derr(std::cerr);
+
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+/**
+ \file
+ \brief Standard Dune debug streams
+
+ The standard debug streams are compiled into libdune to exist
+ globally. This file declares the stream types and the global debug
+ level.
+ */
+
+#ifndef DUNE_COMMON_STDSTREAMS_HH
+#define DUNE_COMMON_STDSTREAMS_HH
+
+#include "debugstream.hh"
+
+namespace Dune {
+
+ /**
+ \addtogroup DebugOut
+ @{
+
+ standard debug streams with level below MINIMAL_DEBUG_LEVEL will
+ collapse to doing nothing if output is requested.
+
+ MINIMAL_DEBUG_LEVEL is set to DUNE_MINIMAL_DEBUG_LEVEL, which is
+ defined in config.h and can be changed by the configure option
+ @code --with-minimal-debug-level=[grave|warn|info|verb|vverb] @endcode
+
+ For a Dune-Release this should be set to at least 4 so that only
+ important messages are active. Dune-developers may adapt this
+ setting to their debugging needs locally
+
+ Keep in mind that libdune has to be recompiled if this value is changed!
+
+
+
+ The singleton instances of the available debug streams can be found in
+ the \ref DebugOut "Standard Debug Streams" module
+
+ @}
+ */
+
+ /**
+ \defgroup StdStreams Standard Debug Streams
+ \ingroup DebugOut
+ @{
+
+ Dune defines several standard output streams for the library
+ routines.
+
+ Applications may control the standard streams via the attach/detach,
+ push/pop interface but should define an independent set of streams (see \ref DebugAppl )
+
+ */
+
+ /**
+ @brief The default minimum debug level.
+
+ If the level of a stream is bigger than this value
+ it will be activated.
+ */
+#ifndef DUNE_MINIMAL_DEBUG_LEVEL
+#define DUNE_MINIMAL_DEBUG_LEVEL 4
+#endif
+ static const DebugLevel MINIMAL_DEBUG_LEVEL = DUNE_MINIMAL_DEBUG_LEVEL;
+
+ /**
+ @brief The level of the very verbose debug stream.
+ @see dvverb
+ */
+ static const DebugLevel VERY_VERBOSE_DEBUG_LEVEL = 1;
+
+ /**
+ @brief Type of very verbose debug stream.
+ @see dvverb
+ */
+ typedef DebugStream<VERY_VERBOSE_DEBUG_LEVEL, MINIMAL_DEBUG_LEVEL> DVVerbType;
+
+ /**
+ \brief stream for very verbose output.
+
+ \code
+ #include <dune/common/stdstreams.hh>
+ \endcode
+
+ Information on the lowest
+ level. This is expected to report insane amounts of
+ information. Use of the activation-flag to only generate output
+ near the problem is recommended.
+ */
+ extern DVVerbType dvverb;
+
+ /**
+ @brief The level of the verbose debug stream.
+ @see dvverb
+ */
+ static const DebugLevel VERBOSE_DEBUG_LEVEL = 2;
+
+ /**
+ @brief Type of more verbose debug stream.
+ @see dverb
+ */
+ typedef DebugStream<VERBOSE_DEBUG_LEVEL, MINIMAL_DEBUG_LEVEL> DVerbType;
+
+ /**
+ @brief Singleton of verbose debug stream.
+
+ \code
+ #include <dune/common/stdstreams.hh>
+ \endcode
+ */
+ extern DVerbType dverb;
+
+ /**
+ @brief The level of the informative debug stream.
+ @see dinfo
+ */
+ static const DebugLevel INFO_DEBUG_LEVEL = 3;
+
+ /**
+ @brief Type of debug stream with info level.
+ @see dinfo
+ */
+ typedef DebugStream<INFO_DEBUG_LEVEL, MINIMAL_DEBUG_LEVEL> DInfoType;
+
+ /**
+ @brief Stream for informative output.
+
+ \code
+ #include <dune/common/stdstreams.hh>
+ \endcode
+
+ Summary infos on what a module
+ does, runtimes, etc.
+ */
+ extern DInfoType dinfo;
+
+ /**
+ @brief The level of the debug stream for warnings.
+ @see dwarn
+ */
+ static const DebugLevel WARN_DEBUG_LEVEL = 4;
+
+ /**
+ @brief Type of debug stream with warn level.
+ @see dwarn
+ */
+ typedef DebugStream<WARN_DEBUG_LEVEL, MINIMAL_DEBUG_LEVEL> DWarnType;
+
+ /**
+ @brief Stream for warnings indicating problems.
+
+ \code
+ #include <dune/common/stdstreams.hh>
+ \endcode
+ */
+ extern DWarnType dwarn;
+
+ /**
+ @brief The level of the debug stream for fatal errors.
+ @see dgrave
+ */
+ static const DebugLevel GRAVE_DEBUG_LEVEL = 5;
+
+ /** @brief Type of debug stream for fatal errors.*/
+ typedef DebugStream<GRAVE_DEBUG_LEVEL, MINIMAL_DEBUG_LEVEL> DGraveType;
+
+ /**
+ @brief Stream for warnings indicating fatal errors.
+
+ \code
+ #include <dune/common/stdstreams.hh>
+ \endcode
+ */
+ extern DGraveType dgrave;
+
+ /** @brief The type of the stream used for error messages. */
+ typedef DebugStream<1> DErrType;
+
+ /**
+ @brief Stream for error messages.
+
+ \code
+ #include <dune/common/stdstreams.hh>
+ \endcode
+
+ Only packages integrating Dune
+ completely will redirect it. The output of derr is independent of
+ the debug-level, only the activation-flag is checked.
+ */
+ extern DErrType derr;
+
+ /** }@ */
+}
+
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+#if HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <cstdlib>
+#include <iostream>
+#include <mutex>
+#include <ostream>
+
+#include <dune/common/stdthread.hh>
+
+namespace Dune
+{
+
+ namespace {
+
+ void printCallOnceError(const char *file, int line, const char *function,
+ const char *msg)
+ {
+ if(file)
+ std::cerr << file << ":" << line << ": ";
+ std::cerr << "error: ";
+ if(function)
+ std::cerr << "(in " << function << "()) ";
+ std::cerr << "std::call_once() is broken.\n"
+ << "\n"
+ << msg << std::endl;
+ }
+
+ void setBool(bool *v)
+ {
+ *v = true;
+ }
+
+ } // anonymous namespace
+
+ void doAssertCallOnce(const char *file, int line, const char *function)
+ {
+ std::once_flag once;
+ bool works = false;
+ try {
+ // pass address to works since call_once passes by value
+ std::call_once(once, setBool, &works);
+ }
+ catch(...) {
+ printCallOnceError(file, line, function,
+"std::call_once() throws an exception. This suggests that the program was\n"
+"linked without a threading library. Common ways to link to a threading\n"
+"library is to specify one of the following during linking: -pthread, \n"
+"-lpthread, or -pthreads. The build system should have tried various of\n"
+"these options, but unfortunately that is only a guess and we cannot verify\n"
+"that we found a working configuration until runtime.\n"
+"\n"
+"Going to rethrow the exception now to give the system library a chance to\n"
+"print more information about it, just in case that helps with debugging.\n"
+ );
+ throw;
+ }
+ if(!works)
+ {
+ printCallOnceError(file, line, function,
+"std::call_once() never calls the function. This suggests that your\n"
+"libctdc++ or your gcc built without threading support (--disable-threads,\n"
+"see https://gcc.gnu.org/install/configure.html). This is probably a bug in\n"
+"__gthread_once() in /usr/include/c++/4.7/x86_64-linux-gnu/bits/gthr-single.h\n"
+"(which should not silently return success without doing anything, but\n"
+"apparently does so in some versions).\n"
+"\n"
+"To fix the issue, either recompile gcc with a working threading\n"
+"implementation, or file a bug for gthr-single.h, or file a bug at\n"
+"https://dune-project.org/flyspray/ and request a workaround at the dune-side."
+ );
+ std::abort();
+ }
+ }
+
+} // namespace Dune
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+#ifndef DUNE_COMMON_STDTHREAD_HH
+#define DUNE_COMMON_STDTHREAD_HH
+
+namespace Dune
+{
+
+ // used internally by assertCallOnce for the actual check
+ void doAssertCallOnce(const char *file, int line, const char *function);
+
+ //! \brief Make sure call_once() works and provide a helpful error message
+ //! otherwise.
+ /**
+ * For call_once() to work, certain versions of libstdc++ need to be
+ * _linked_ with -pthread or similar flags. If that is not the case,
+ * call_once() will throw an exception. This function checks that
+ * call_once() can indeed be used, i.e. that it does not throw an exception
+ * when it should not, and that the code does indeed get executed. If
+ * call_once() cannot be used, assertCallOnce() aborts the program with a
+ * helpful error message.
+ *
+ * The check is only actually executed the first time assertCallOnce() is
+ * called.
+ *
+ * The arguments \c file and \c line specify the filename and line number
+ * that should appear in the error message. They are ignored if \c file is
+ * 0. The argument \c function specifies the name of the function to appear
+ * in the error message. It is ignored if \c function is 0.
+ */
+
+ inline void assertCallOnce(const char *file = nullptr, int line = -1,
+ const char *function = nullptr)
+ {
+ // make sure to call this only the first time this function is invoked
+ [[maybe_unused]] static const bool works
+ = (doAssertCallOnce(file, line, function), true);
+ }
+
+ //! \brief Make sure call_once() works and provide a helpful error message
+ //! otherwise.
+ /**
+ * This calls assertCallOnce() and automatically provides information about
+ * the caller in the error message.
+ */
+#define DUNE_ASSERT_CALL_ONCE() \
+ ::Dune::assertCallOnce(__FILE__, __LINE__, __func__)
+
+} // namespace Dune
+
+#endif // DUNE_COMMON_STDTHREAD_HH
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+#ifndef DUNE_STREAMOPERATORS_HH
+#define DUNE_STREAMOPERATORS_HH
+
+/** \file
+ \brief Implementation of stream operators for std::array and std::tuple
+ */
+
+#include <array>
+#include <tuple>
+#include <utility>
+
+#include <dune/common/hybridutilities.hh>
+
+namespace Dune
+{
+ /** @addtogroup Common
+
+ @{
+ */
+
+ //! Print a std::tuple
+ template<typename Stream, typename... Ts>
+ inline Stream& operator<<(Stream& stream, const std::tuple<Ts...>& t)
+ {
+ stream<<"[";
+ if(sizeof...(Ts)>0)
+ {
+ Hybrid::forEach(std::make_index_sequence<sizeof...(Ts)-1>{},
+ [&](auto i){stream<<std::get<i>(t)<<",";});
+ stream<<std::get<sizeof...(Ts)-1>(t);
+ }
+ stream<<"]";
+ return stream;
+ }
+
+ //! Read a std::tuple
+ template<typename Stream, typename... Ts>
+ inline Stream& operator>>(Stream& stream, std::tuple<Ts...>& t)
+ {
+ Hybrid::forEach(std::make_index_sequence<sizeof...(Ts)>{},
+ [&](auto i){stream>>std::get<i>(t);});
+ return stream;
+ }
+
+ //! Print a std::array
+ template<typename Stream, typename T, std::size_t N>
+ inline Stream& operator<<(Stream& stream, const std::array<T,N>& a)
+ {
+ stream<<"[";
+ if(N>0)
+ {
+ for(std::size_t i=0; i<N-1; ++i)
+ stream<<a[i]<<",";
+ stream<<a[N-1];
+ }
+ stream<<"]";
+ return stream;
+ }
+
+ /** @} */
+
+} // end namespace Dune
+
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_COMMON_STRINGUTILITY_HH
+#define DUNE_COMMON_STRINGUTILITY_HH
+
+/** \file
+ \brief Miscellaneous helper stuff
+ */
+
+#include <cstddef>
+#include <cstring>
+#include <algorithm>
+#include <cassert>
+#include <cstdio>
+#include <memory>
+#include <string>
+#include <new>
+
+#include <dune/common/exceptions.hh>
+
+
+namespace Dune {
+
+ /**
+ * @addtogroup StringUtilities
+ *
+ * @{
+ */
+
+ /** \brief Check whether a character container has a given prefix
+ *
+ * The container must support the begin() and size() methods.
+ */
+ template<typename C>
+ bool hasPrefix(const C& c, const char* prefix) {
+ std::size_t len = std::strlen(prefix);
+ return c.size() >= len &&
+ std::equal(prefix, prefix+len, c.begin());
+ }
+
+ /** \brief Check whether a character container has a given suffix
+ *
+ * The container must support the begin() and size() methods and the
+ * const_iterator member type.
+ *
+ * \note This is slow for containers which don't have random access iterators.
+ * In the case of containers with bidirectional iterators, this
+ * slowness is unnecessary.
+ */
+ template<typename C>
+ bool hasSuffix(const C& c, const char* suffix) {
+ std::size_t len = std::strlen(suffix);
+ if(c.size() < len) return false;
+ typename C::const_iterator it = c.begin();
+ std::advance(it, c.size() - len);
+ return std::equal(suffix, suffix+len, it);
+ }
+
+ /**
+ * \brief Format values according to printf format string
+ *
+ * \param s The format string to be used
+ * \param args The valued to be formatted
+ *
+ * This is a wrapper to std::snprintf that provides
+ * overflow save printf functionality. For up to 1000
+ * characters a static buffer is used. If this is not sufficient
+ * a dynamic buffer of appropriate size is allocated.
+ */
+ template<class... T>
+ static std::string formatString(const std::string& s, const T&... args)
+ {
+ static const int bufferSize=1000;
+ char buffer[bufferSize];
+
+ // try to format with static buffer
+ int r = std::snprintf(buffer, bufferSize, s.c_str(), args...);
+
+ // negative return values correspond to errors
+ if (r<0)
+ DUNE_THROW(Dune::Exception,"Could not convert format string using given arguments.");
+
+ // if buffer was large enough return result as string
+ if (r<bufferSize)
+ return std::string(buffer);
+
+ // if buffer was to small allocate a larger buffer using
+ // the predicted size hint (+1 for the terminating 0-byte).
+ int dynamicBufferSize = r+1;
+
+ std::unique_ptr<char[]> dynamicBuffer;
+ try {
+ dynamicBuffer = std::make_unique<char[]>(dynamicBufferSize);
+ }
+ catch (const std::bad_alloc&) {
+ DUNE_THROW(Dune::Exception,"Could allocate large enough dynamic buffer in formatString.");
+ }
+
+ // convert and check for errors again
+ r = std::snprintf(dynamicBuffer.get(), dynamicBufferSize, s.c_str(), args...);
+ if (r<0)
+ DUNE_THROW(Dune::Exception,"Could not convert format string using given arguments.");
+
+ // the new buffer should always be large enough
+ assert(r<dynamicBufferSize);
+
+ return std::string(dynamicBuffer.get());
+ }
+ /** @} */
+
+}
+
+#endif // DUNE_COMMON_STRINGUTILITY_HH
--- /dev/null
+include(DuneCMakeCompat)
+include(DuneInstance)
+
+dune_add_test(SOURCES arithmetictestsuitetest.cc
+ LINK_LIBRARIES dunecommon
+ LABELS quick)
+
+dune_add_test(SOURCES arraylisttest.cc
+ LABELS quick)
+
+dune_add_test(SOURCES arraytest.cc
+ LABELS quick)
+
+dune_add_test(SOURCES assertandreturntest.cc
+ LINK_LIBRARIES dunecommon
+ LABELS quick)
+dune_add_test(NAME assertandreturntest_compiletime_fail
+ SOURCES assertandreturntest.cc
+ LINK_LIBRARIES dunecommon
+ COMPILE_DEFINITIONS "TEST_COMPILETIME_FAIL"
+ EXPECT_COMPILE_FAIL
+ LABELS quick)
+dune_add_test(NAME assertandreturntest_ndebug
+ SOURCES assertandreturntest.cc
+ LINK_LIBRARIES dunecommon
+ COMPILE_DEFINITIONS "TEST_NDEBUG"
+ LABELS quick)
+
+dune_add_test(SOURCES autocopytest.cc
+ LABELS quick)
+
+dune_add_test(SOURCES bigunsignedinttest.cc
+ LINK_LIBRARIES dunecommon
+ LABELS quick)
+
+dune_add_test(SOURCES bitsetvectortest.cc
+ LABELS quick)
+
+dune_add_test(SOURCES boundscheckingtest.cc
+ COMPILE_DEFINITIONS DUNE_CHECK_BOUNDS=1
+ LINK_LIBRARIES dunecommon
+ LABELS quick)
+dune_add_test(SOURCES boundscheckingmvtest.cc
+ COMPILE_DEFINITIONS DUNE_CHECK_BOUNDS=1
+ LINK_LIBRARIES dunecommon
+ LABELS quick)
+dune_add_test(SOURCES boundscheckingoptest.cc
+ COMPILE_DEFINITIONS DUNE_CHECK_BOUNDS=1
+ LINK_LIBRARIES dunecommon
+ LABELS quick)
+
+dune_add_test(SOURCES calloncetest.cc
+ LINK_LIBRARIES dunecommon
+ LABELS quick)
+
+dune_add_test(SOURCES check_fvector_size.cc
+ LABELS quick)
+
+dune_add_test(NAME check_fvector_size_fail1
+ SOURCES check_fvector_size_fail.cc
+ COMPILE_DEFINITIONS DIM=1
+ EXPECT_COMPILE_FAIL
+ LABELS quick)
+
+dune_add_test(NAME check_fvector_size_fail2
+ SOURCES check_fvector_size_fail.cc
+ COMPILE_DEFINITIONS DIM=3
+ EXPECT_COMPILE_FAIL
+ LABELS quick)
+
+dune_add_test(NAME classnametest-demangled
+ SOURCES classnametest.cc
+ LINK_LIBRARIES dunecommon
+ LABELS quick)
+
+dune_add_test(NAME classnametest-fallback
+ SOURCES classnametest.cc
+ LINK_LIBRARIES dunecommon
+ COMPILE_DEFINITIONS DISABLE_CXA_DEMANGLE
+ LABELS quick)
+
+dune_add_test(SOURCES concept.cc
+ LINK_LIBRARIES dunecommon
+ LABELS quick)
+
+dune_add_test(SOURCES constexprifelsetest.cc
+ LINK_LIBRARIES dunecommon
+ LABELS quick)
+
+dune_add_test(SOURCES debugaligntest.cc
+ LINK_LIBRARIES dunecommon
+ LABELS quick)
+
+# Generate files with instanciations, external declarations, and also the
+# invocations in the test for each instance.
+dune_instance_begin(FILES debugalignsimdtest.hh debugalignsimdtest.cc)
+foreach(SCALAR IN ITEMS double bool)
+ dune_instance_add(ID "${SCALAR}")
+ foreach(POINT IN ITEMS Type BinaryOpsScalarVector BinaryOpsVectorScalar)
+ dune_instance_add(TEMPLATE POINT
+ ID "${POINT}_${SCALAR}"
+ FILES debugalignsimd.cc)
+ endforeach()
+endforeach()
+dune_instance_end()
+list(FILTER DUNE_INSTANCE_GENERATED INCLUDE REGEX [[\.cc$]])
+dune_add_test(NAME debugalignsimdtest
+ SOURCES ${DUNE_INSTANCE_GENERATED}
+ LINK_LIBRARIES dunecommon
+ LABELS quick)
+
+dune_add_test(SOURCES densematrixassignmenttest.cc
+ LINK_LIBRARIES dunecommon
+ LABELS quick)
+dune_add_test(NAME densematrixassignmenttest_fail0
+ SOURCES densematrixassignmenttest.cc
+ LINK_LIBRARIES dunecommon
+ COMPILE_DEFINITIONS "FAILURE0"
+ EXPECT_COMPILE_FAIL
+ LABELS quick)
+dune_add_test(NAME densematrixassignmenttest_fail1
+ SOURCES densematrixassignmenttest.cc
+ LINK_LIBRARIES dunecommon
+ COMPILE_DEFINITIONS "FAILURE1"
+ EXPECT_COMPILE_FAIL
+ LABELS quick)
+dune_add_test(NAME densematrixassignmenttest_fail2
+ SOURCES densematrixassignmenttest.cc
+ LINK_LIBRARIES dunecommon
+ COMPILE_DEFINITIONS "FAILURE2"
+ EXPECT_COMPILE_FAIL
+ LABELS quick)
+dune_add_test(NAME densematrixassignmenttest_fail3
+ SOURCES densematrixassignmenttest.cc
+ LINK_LIBRARIES dunecommon
+ COMPILE_DEFINITIONS "FAILURE3"
+ EXPECT_COMPILE_FAIL
+ LABELS quick)
+dune_add_test(NAME densematrixassignmenttest_fail4
+ SOURCES densematrixassignmenttest.cc
+ LINK_LIBRARIES dunecommon
+ COMPILE_DEFINITIONS "FAILURE4"
+ EXPECT_COMPILE_FAIL
+ LABELS quick)
+dune_add_test(NAME densematrixassignmenttest_fail5
+ SOURCES densematrixassignmenttest.cc
+ LINK_LIBRARIES dunecommon
+ COMPILE_DEFINITIONS "FAILURE5"
+ EXPECT_COMPILE_FAIL
+ LABELS quick)
+dune_add_test(NAME densematrixassignmenttest_fail6
+ SOURCES densematrixassignmenttest.cc
+ LINK_LIBRARIES dunecommon
+ COMPILE_DEFINITIONS "FAILURE6"
+ EXPECT_COMPILE_FAIL
+ LABELS quick)
+
+dune_add_test(SOURCES densevectorassignmenttest.cc
+ LINK_LIBRARIES dunecommon
+ LABELS quick)
+
+dune_add_test(SOURCES diagonalmatrixtest.cc
+ LINK_LIBRARIES dunecommon
+ LABELS quick)
+
+dune_add_test(SOURCES dynmatrixtest.cc
+ LINK_LIBRARIES dunecommon
+ LABELS quick)
+
+dune_add_test(SOURCES dynvectortest.cc
+ LINK_LIBRARIES dunecommon
+ LABELS quick)
+
+dune_add_test(SOURCES densevectortest.cc
+ LINK_LIBRARIES dunecommon
+ LABELS quick)
+
+dune_add_test(SOURCES enumsettest.cc
+ LABELS quick)
+
+dune_add_test(SOURCES filledarraytest.cc
+ LABELS quick)
+
+dune_add_test(SOURCES fmatrixtest.cc
+ LINK_LIBRARIES dunecommon
+ LABELS quick)
+add_dune_vc_flags(fmatrixtest)
+
+dune_add_test(SOURCES functiontest.cc
+ LINK_LIBRARIES dunecommon
+ LABELS quick)
+
+dune_add_test(SOURCES fvectortest.cc
+ LINK_LIBRARIES dunecommon
+ LABELS quick)
+
+dune_add_test(SOURCES fvectorconversion1d.cc
+ LINK_LIBRARIES dunecommon
+ LABELS quick)
+
+dune_add_test(SOURCES genericiterator_compile_fail.cc
+ EXPECT_COMPILE_FAIL
+ LABELS quick)
+
+dune_add_test(SOURCES hybridutilitiestest.cc
+ LINK_LIBRARIES dunecommon
+ LABELS quick)
+
+dune_add_test(SOURCES indicestest.cc
+ LABELS quick)
+
+dune_add_test(SOURCES iscallabletest.cc
+ LINK_LIBRARIES dunecommon
+ LABELS quick)
+
+dune_add_test(SOURCES iteratorfacadetest2.cc
+ LABELS quick)
+
+dune_add_test(SOURCES iteratorfacadetest.cc
+ LABELS quick)
+
+dune_add_test(SOURCES lrutest.cc
+ LINK_LIBRARIES dunecommon
+ LABELS quick)
+
+dune_add_test(SOURCES mathclassifierstest.cc
+ LINK_LIBRARIES dunecommon)
+
+dune_add_test(SOURCES metistest.cc
+ CMAKE_GUARD METIS_FOUND)
+add_dune_metis_flags(metistest)
+
+dune_add_test(SOURCES mpicommunicationtest.cc
+ LINK_LIBRARIES dunecommon
+ MPI_RANKS 1 2 4 8
+ TIMEOUT 300
+ LABELS quick)
+
+dune_add_test(SOURCES mpiguardtest.cc
+ LINK_LIBRARIES dunecommon
+ MPI_RANKS 1 2 4 8
+ TIMEOUT 300
+ LABELS quick)
+
+dune_add_test(SOURCES mpihelpertest.cc
+ LINK_LIBRARIES dunecommon
+ MPI_RANKS 1 2 4 8
+ TIMEOUT 300
+ LABELS quick)
+
+dune_add_test(NAME mpihelpertest2
+ SOURCES mpihelpertest.cc
+ COMPILE_DEFINITIONS MPIHELPER_PREINITIALIZE
+ LINK_LIBRARIES dunecommon
+ MPI_RANKS 1 2 4 8
+ TIMEOUT 300
+ LABELS quick)
+
+dune_add_test(SOURCES overloadsettest.cc
+ LINK_LIBRARIES dunecommon
+ LABELS quick)
+
+dune_add_test(NAME parameterizedobjecttest
+ SOURCES parameterizedobjecttest.cc parameterizedobjectfactorysingleton.cc
+ LINK_LIBRARIES dunecommon
+ LABELS quick)
+
+dune_add_test(SOURCES parametertreelocaletest.cc
+ LINK_LIBRARIES dunecommon
+ LABELS quick)
+
+dune_add_test(SOURCES parametertreetest.cc
+ LINK_LIBRARIES dunecommon
+ LABELS quick)
+
+dune_add_test(SOURCES parmetistest.cc
+ MPI_RANKS 3
+ TIMEOUT 300
+ CMAKE_GUARD ParMETIS_FOUND
+ LABELS quick)
+add_dune_parmetis_flags(parmetistest)
+
+dune_add_test(SOURCES pathtest.cc
+ LINK_LIBRARIES dunecommon
+ LABELS quick)
+
+dune_add_test(SOURCES poolallocatortest.cc
+ LABELS quick)
+
+dune_add_test(SOURCES powertest.cc
+ LINK_LIBRARIES dunecommon
+ LABELS quick)
+
+dune_add_test(SOURCES quadmathtest.cc
+ LINK_LIBRARIES dunecommon
+ CMAKE_GUARD HAVE_QUADMATH)
+
+dune_add_test(SOURCES rangeutilitiestest.cc
+ LINK_LIBRARIES dunecommon
+ LABELS quick)
+
+dune_add_test(SOURCES reservedvectortest.cc
+ LINK_LIBRARIES dunecommon
+ LABELS quick)
+
+dune_add_test(SOURCES scotchtest.cc
+ LINK_LIBRARIES dunecommon
+ CMAKE_GUARD PTScotch_FOUND
+ LABELS quick)
+
+dune_add_test(SOURCES shared_ptrtest.cc
+ LABELS quick)
+
+dune_add_test(SOURCES singletontest.cc
+ LABELS quick)
+
+dune_add_test(SOURCES sllisttest.cc
+ LABELS quick)
+
+dune_add_test(SOURCES stdidentity.cc
+ LABELS quick)
+
+dune_add_test(SOURCES stdapplytest.cc
+ LINK_LIBRARIES dunecommon
+ LABELS quick)
+
+dune_add_test(SOURCES streamoperatorstest.cc
+ LABELS quick)
+
+dune_add_test(SOURCES streamtest.cc
+ LINK_LIBRARIES dunecommon
+ LABELS quick)
+
+dune_add_test(SOURCES stringutilitytest.cc
+ LINK_LIBRARIES dunecommon
+ LABELS quick)
+
+dune_add_test(SOURCES testdebugallocator.cc
+ LINK_LIBRARIES dunecommon
+ LABELS quick)
+
+dune_add_test(NAME testdebugallocator_fail1
+ SOURCES testdebugallocator.cc
+ LINK_LIBRARIES dunecommon
+ COMPILE_DEFINITIONS "FAILURE1;EXPECTED_SIGNAL=SIGSEGV;EXPECTED_ALT_SIGNAL=SIGBUS"
+ EXPECT_FAIL
+ LABELS quick)
+
+dune_add_test(NAME testdebugallocator_fail2
+ SOURCES testdebugallocator.cc
+ LINK_LIBRARIES dunecommon
+ COMPILE_DEFINITIONS "FAILURE2;EXPECTED_SIGNAL=SIGABRT"
+ EXPECT_FAIL
+ LABELS quick)
+
+dune_add_test(NAME testdebugallocator_fail3
+ SOURCES testdebugallocator.cc
+ LINK_LIBRARIES dunecommon
+ COMPILE_DEFINITIONS "FAILURE3;EXPECTED_SIGNAL=SIGABRT"
+ EXPECT_FAIL
+ LABELS quick)
+
+dune_add_test(NAME testdebugallocator_fail4
+ SOURCES testdebugallocator.cc
+ LINK_LIBRARIES dunecommon
+ COMPILE_DEFINITIONS "FAILURE4;DEBUG_ALLOCATOR_KEEP=1;EXPECTED_SIGNAL=SIGSEGV;EXPECTED_ALT_SIGNAL=SIGBUS"
+ EXPECT_FAIL
+ LABELS quick)
+
+dune_add_test(NAME testdebugallocator_fail5
+ SOURCES testdebugallocator.cc
+ LINK_LIBRARIES dunecommon
+ COMPILE_DEFINITIONS "FAILURE5;EXPECTED_SIGNAL=SIGSEGV;EXPECTED_ALT_SIGNAL=SIGBUS"
+ EXPECT_FAIL
+ LABELS quick)
+
+dune_add_test(SOURCES testfloatcmp.cc
+ LABELS quick)
+
+dune_add_test(SOURCES transposetest.cc
+ LINK_LIBRARIES dunecommon
+ LABELS quick)
+
+dune_add_test(SOURCES tupleutilitytest.cc
+ LABELS quick)
+
+dune_add_test(SOURCES typeutilitytest.cc
+ LABELS quick)
+
+dune_add_test(SOURCES typelisttest.cc
+ LINK_LIBRARIES dunecommon
+ LABELS quick)
+
+dune_add_test(SOURCES utilitytest.cc
+ LABELS quick)
+
+dune_add_test(SOURCES eigenvaluestest.cc
+ LINK_LIBRARIES dunecommon
+ CMAKE_GUARD LAPACK_FOUND
+ LABELS quick)
+
+dune_add_test(SOURCES versiontest.cc
+ LINK_LIBRARIES dunecommon
+ LABELS quick)
+
+dune_add_test(SOURCES mathtest.cc
+ LINK_LIBRARIES dunecommon
+ LABELS quick)
+
+dune_add_test(SOURCES vcexpectedimpltest.cc
+ LINK_LIBRARIES dunecommon
+ LABELS quick
+ CMAKE_GUARD Vc_FOUND)
+add_dune_vc_flags(vcexpectedimpltest)
+
+dune_add_test(SOURCES alignedallocatortest.cc
+ LINK_LIBRARIES dunecommon)
+
+install(
+ FILES
+ arithmetictestsuite.hh
+ collectorstream.hh
+ testsuite.hh
+ DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/dune/common/test)
--- /dev/null
+#include <config.h>
+
+#include <cstdint>
+#include <tuple>
+#include <vector>
+
+#include <dune/common/alignedallocator.hh>
+#include <dune/common/debugalign.hh>
+#include <dune/common/hybridutilities.hh>
+#include <dune/common/test/testsuite.hh>
+
+
+template<class T>
+void checkAlignment(Dune::TestSuite &test)
+{
+ std::vector<T, Dune::AlignedAllocator<T>> defaultalignment(4);
+
+ test.check(Dune::isAligned(defaultalignment.data(), std::alignment_of<T>::value), "defaultalignment isAligned")
+ << "alignment(" << std::alignment_of<T>::value << ") not detected for " << Dune::className<T>();
+
+ std::vector<T, Dune::AlignedAllocator<T,16>> alignment16(4);
+
+ test.check(Dune::isAligned(alignment16.data(), 16), "alignment16 isAligned")
+ << "alignment(16) not detected for " << Dune::className<T>();
+}
+
+int main(int argc, char **argv)
+{
+ Dune::TestSuite test;
+
+ using ArithmeticTypes = std::tuple<
+ char, signed char, unsigned char,
+ short, unsigned short,
+ int, unsigned,
+ long, long unsigned,
+ long long, long long unsigned,
+ wchar_t, char16_t, char32_t,
+ float, double, long double>;
+
+ Dune::Hybrid::forEach(ArithmeticTypes(), [&](auto val) {
+ using T = decltype(val);
+ checkAlignment<T>(test);
+ });
+
+ return test.exit();
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_COMMON_TEST_ARITHMETICTESTSUITE_HH
+#define DUNE_COMMON_TEST_ARITHMETICTESTSUITE_HH
+
+#include <string>
+#include <type_traits>
+#include <utility>
+
+#include <dune/common/classname.hh>
+#include <dune/common/test/testsuite.hh>
+
+namespace Dune {
+
+/*
+ * silence warnings from GCC about using integer operands on a bool
+ * (when instantiated for T=bool)
+ */
+#if __GNUC__ >= 7
+# pragma GCC diagnostic push
+# pragma GCC diagnostic ignored "-Wbool-operation"
+# pragma GCC diagnostic ignored "-Wint-in-bool-context"
+#endif
+
+ //! Test suite for arithmetic types
+ /**
+ * You usually want to call the member function `checkArithmetic()`. The
+ * individual component tests are however available for special needs.
+ */
+ class ArithmeticTestSuite :
+ public TestSuite
+ {
+ public:
+ // inherit all constructors from TestSuite
+ using TestSuite::TestSuite;
+
+ //! tag denoting any arithmetic type
+ struct Arithmetic {};
+ //! tag denoting integral types
+ struct Integral : Arithmetic {};
+ //! tag denoting boolean types
+ struct Boolean : Integral {};
+ //! tag denoting signed integral types
+ struct Signed : Integral {};
+ //! tag denoting unsigned integral types
+ struct Unsigned : Integral {};
+ //! tag denoting floating point types
+ struct Floating : Arithmetic {};
+
+ private:
+ static const char *name(Arithmetic) { return "Arithmetic"; }
+ static const char *name(Integral ) { return "Integral"; }
+ static const char *name(Boolean ) { return "Boolean"; }
+ static const char *name(Signed ) { return "Signed"; }
+ static const char *name(Unsigned ) { return "Unsigned"; }
+ static const char *name(Floating ) { return "Floating"; }
+
+ template<class C, class Then, class Else = void>
+ using Cond = typename std::conditional<C::value, Then, Else>::type;
+
+ public:
+ //! determine arithmetic tag for the given type
+ /**
+ * `T` can be either one of the fundamental arithmetic types, in which
+ * case a default-constructed tag object for that type is returned. Or it
+ * can be a class derived from `Arithmetic`, in which case a
+ * default-constructed object of that class is is returned.
+ */
+ template<class T>
+ constexpr static auto tag(T = T{})
+ {
+ return
+ Cond<std::is_convertible<T, Arithmetic>, T,
+ Cond<std::is_floating_point<T>, Floating,
+ Cond<std::is_integral<T>,
+ Cond<std::is_same<bool, T>, Boolean,
+ Cond<std::is_signed<T>, Signed,
+ Cond<std::is_unsigned<T>, Unsigned
+ > > >
+ > > >{};
+ }
+
+#define DUNE_TEST_FUNCTION(T, tag) \
+ static const auto function = \
+ std::string(__func__) + "<" + className<T>() + ">(" + name(tag) + ")"
+
+#define DUNE_TEST_CHECK(expr) \
+ (check((expr), function) \
+ << __FILE__ << ":" << __LINE__ << ": Check \"" << #expr << "\"")
+
+ //
+ // check basic operations: construct, copy, compare
+ //
+
+ //! check the default constructors
+ template<class T>
+ void checkDefaultConstruct([[maybe_unused]] Arithmetic arithmetic_tag)
+ {
+ [[maybe_unused]] T t0;
+ (void)T();
+ [[maybe_unused]] T t1{};
+ [[maybe_unused]] T t2 = {};
+ }
+
+ //! check explicit conversion from and to int
+ template<class T>
+ void checkExplicitIntConvert(Arithmetic arithmetic_tag)
+ {
+ DUNE_TEST_FUNCTION(T, arithmetic_tag);
+ // this test may be applied to boolean-type types. 0 and 1 are the only
+ // values that survive that.
+ T t0(0); DUNE_TEST_CHECK(int(t0) == 0);
+ T t1(1); DUNE_TEST_CHECK(int(t1) == 1);
+ }
+
+ //! check the move constructor
+ template<class T>
+ void checkMoveConstruct(Arithmetic arithmetic_tag)
+ {
+ DUNE_TEST_FUNCTION(T, arithmetic_tag);
+ for(int i : { 0, 1 })
+ {
+ T t0(i);
+
+ T t1(std::move(t0));
+ T t2 = std::move(t1);
+ T t3{ std::move(t2) };
+ T t4 = { std::move(t3) };
+
+ DUNE_TEST_CHECK(bool(t4 == T(i)));
+ }
+ }
+
+ //! check the copy constructor
+ template<class T>
+ void checkCopyConstruct(Arithmetic arithmetic_tag)
+ {
+ DUNE_TEST_FUNCTION(T, arithmetic_tag);
+ for(int i : { 0, 1 })
+ {
+ T t0(i);
+
+ T t1(t0);
+ T t2 = t1;
+ T t3{ t2 };
+ T t4 = { t3 };
+
+ DUNE_TEST_CHECK(bool(t0 == T(i)));
+ DUNE_TEST_CHECK(bool(t1 == T(i)));
+ DUNE_TEST_CHECK(bool(t2 == T(i)));
+ DUNE_TEST_CHECK(bool(t3 == T(i)));
+ DUNE_TEST_CHECK(bool(t4 == T(i)));
+ }
+ }
+
+ //! check the move assignment operator
+ template<class T>
+ void checkMoveAssign(Arithmetic arithmetic_tag)
+ {
+ DUNE_TEST_FUNCTION(T, arithmetic_tag);
+ for(int i : { 0, 1 })
+ {
+ T t0(i);
+ T t2, t4;
+
+ t2 = std::move(t0);
+ t4 = { std::move(t2) };
+
+ DUNE_TEST_CHECK(bool(t4 == T(i)));
+ }
+ }
+
+ //! check the copy assignment operator
+ template<class T>
+ void checkCopyAssign(Arithmetic arithmetic_tag)
+ {
+ DUNE_TEST_FUNCTION(T, arithmetic_tag);
+ for(int i : { 0, 1 })
+ {
+ T t0(i);
+ T t2, t4;
+
+ t2 = t0;
+ t4 = { t2 };
+
+ DUNE_TEST_CHECK(bool(t0 == T(i)));
+ DUNE_TEST_CHECK(bool(t2 == T(i)));
+ DUNE_TEST_CHECK(bool(t4 == T(i)));
+ }
+ }
+
+ //! check `==` and `!=`
+ /**
+ * \note We do not require the result to be _implicitly_ convertible to
+ * bool, but it must be contextually convertible to bool.
+ */
+ template<class T>
+ void checkEqual(Arithmetic arithmetic_tag)
+ {
+ DUNE_TEST_FUNCTION(T, arithmetic_tag);
+ T t0(0);
+ T t1(1);
+
+ DUNE_TEST_CHECK(bool(t0 == T(0)));
+ DUNE_TEST_CHECK(bool(t1 == T(1)));
+
+ DUNE_TEST_CHECK(!bool(t0 == T(1)));
+ DUNE_TEST_CHECK(!bool(t1 == T(0)));
+ DUNE_TEST_CHECK(!bool(t0 == t1));
+
+ DUNE_TEST_CHECK(!bool(t0 != T(0)));
+ DUNE_TEST_CHECK(!bool(t1 != T(1)));
+
+ DUNE_TEST_CHECK(bool(t0 != T(1)));
+ DUNE_TEST_CHECK(bool(t1 != T(0)));
+ DUNE_TEST_CHECK(bool(t0 != t1));
+ }
+
+ //
+ // checks for unary operators
+ //
+
+ //! check postfix `++`
+ /**
+ * Applies to boolean (deprecated), integral, and floating point.
+ */
+ template<class T>
+ void checkPostfixInc(Arithmetic arithmetic_tag)
+ {
+ DUNE_TEST_FUNCTION(T, arithmetic_tag);
+
+ T t0(0);
+ DUNE_TEST_CHECK(bool(T(t0++) == T(0)));
+ DUNE_TEST_CHECK(bool(t0 == T(1)));
+ }
+ template<class T>
+ void checkPostfixInc(Boolean) {}
+
+ //! check postfix `--`
+ /**
+ * Applies to integral (no boolean), and floating point.
+ */
+ template<class T>
+ void checkPostfixDec(Arithmetic arithmetic_tag)
+ {
+ DUNE_TEST_FUNCTION(T, arithmetic_tag);
+
+ T t1(1);
+ DUNE_TEST_CHECK(bool(T(t1--) == T(1)));
+ DUNE_TEST_CHECK(bool(t1 == T(0)));
+ }
+ template<class T>
+ void checkPostfixDec(Boolean) {}
+
+ //! check prefix `+`
+ /**
+ * Applies to boolean, integral, and floating point.
+ */
+ template<class T>
+ void checkPrefixPlus(Arithmetic arithmetic_tag)
+ {
+ DUNE_TEST_FUNCTION(T, arithmetic_tag);
+
+ DUNE_TEST_CHECK(bool(T(+T(0)) == T(0)));
+ DUNE_TEST_CHECK(bool(T(+T(1)) == T(1)));
+ }
+
+ //! check prefix `-`
+ /**
+ * Applies to boolean, integral, and floating point.
+ */
+ template<class T>
+ void checkPrefixMinus(Arithmetic arithmetic_tag)
+ {
+ DUNE_TEST_FUNCTION(T, arithmetic_tag);
+
+ DUNE_TEST_CHECK(bool(T(-T(0)) == T( 0)));
+ DUNE_TEST_CHECK(bool(T(-T(1)) == T(-1)));
+ }
+
+ //! check prefix `!`
+ /**
+ * Applies to boolean, integral, and floating point.
+ */
+ template<class T>
+ void checkPrefixNot(Arithmetic arithmetic_tag)
+ {
+ DUNE_TEST_FUNCTION(T, arithmetic_tag);
+
+ DUNE_TEST_CHECK(bool(!T(0)));
+ DUNE_TEST_CHECK(!bool(!T(1)));
+ }
+
+ //! check prefix `~`
+ /**
+ * Applies to boolean and integral.
+ */
+ template<class T>
+ void checkPrefixBitNot(Boolean arithmetic_tag)
+ {
+ DUNE_TEST_FUNCTION(T, arithmetic_tag);
+
+ DUNE_TEST_CHECK(bool(T(~T(0))));
+ }
+ template<class T>
+ void checkPrefixBitNot(Integral arithmetic_tag)
+ {
+ DUNE_TEST_FUNCTION(T, arithmetic_tag);
+
+ DUNE_TEST_CHECK(bool(T(~T(0))));
+ DUNE_TEST_CHECK(bool(T(~T(1))));
+
+ DUNE_TEST_CHECK(bool(T(~T(~T(0))) == T(0)));
+ DUNE_TEST_CHECK(bool(T(~T(~T(1))) == T(1)));
+ }
+ template<class T>
+ void checkPrefixBitNot(Unsigned arithmetic_tag)
+ {
+ DUNE_TEST_FUNCTION(T, arithmetic_tag);
+
+ checkPrefixBitNot<T>(Integral{});
+
+ DUNE_TEST_CHECK(bool(T(~T(0)) == T(-1)));
+ DUNE_TEST_CHECK(bool(T(~T(1)) == T(-2)));
+ }
+ template<class T>
+ void checkPrefixBitNot(Floating) {}
+
+ //! check postfix `++`
+ /**
+ * Applies to boolean (deprecated), integral, and floating point.
+ */
+ template<class T>
+ void checkPrefixInc(Arithmetic arithmetic_tag)
+ {
+ DUNE_TEST_FUNCTION(T, arithmetic_tag);
+
+ T t0(0);
+ DUNE_TEST_CHECK(bool(T(++t0) == T(1)));
+ DUNE_TEST_CHECK(bool(t0 == T(1)));
+ ++t0 = T(0);
+ DUNE_TEST_CHECK(bool(t0 == T(0)));
+ }
+ template<class T>
+ void checkPrefixInc(Boolean) {}
+
+ //! check postfix `--`
+ /**
+ * Applies to integral (no boolean), and floating point.
+ */
+ template<class T>
+ void checkPrefixDec(Arithmetic arithmetic_tag)
+ {
+ DUNE_TEST_FUNCTION(T, arithmetic_tag);
+
+ T t1(1);
+ DUNE_TEST_CHECK(bool(T(--t1) == T(0)));
+ DUNE_TEST_CHECK(bool(t1 == T(0)));
+ t1 = T(1);
+ --t1 = T(1);
+ DUNE_TEST_CHECK(bool(t1 == T(1)));
+ }
+ template<class T>
+ void checkPrefixDec(Boolean) {}
+
+ //
+ // checks for infox operators
+ //
+
+ //! check infix `*`
+ /**
+ * Applies to boolean, integral, and floating point.
+ */
+ template<class T>
+ void checkInfixMul(Arithmetic arithmetic_tag)
+ {
+ DUNE_TEST_FUNCTION(T, arithmetic_tag);
+
+ DUNE_TEST_CHECK(bool(T(T(0)*T(0)) == T(0)));
+ DUNE_TEST_CHECK(bool(T(T(1)*T(0)) == T(0)));
+ DUNE_TEST_CHECK(bool(T(T(0)*T(1)) == T(0)));
+ DUNE_TEST_CHECK(bool(T(T(1)*T(1)) == T(1)));
+ }
+
+ //! check infix `/`
+ /**
+ * Applies to boolean, integral, and floating point.
+ */
+ template<class T>
+ void checkInfixDiv(Arithmetic arithmetic_tag)
+ {
+ DUNE_TEST_FUNCTION(T, arithmetic_tag);
+
+ DUNE_TEST_CHECK(bool(T(T(0)/T(1)) == T(0)));
+ DUNE_TEST_CHECK(bool(T(T(1)/T(1)) == T(1)));
+ }
+
+ //! check infix `%`
+ /**
+ * Applies to boolean and integral.
+ */
+ template<class T>
+ void checkInfixRem(Arithmetic arithmetic_tag)
+ {
+ DUNE_TEST_FUNCTION(T, arithmetic_tag);
+
+ DUNE_TEST_CHECK(bool(T(T(0)%T(1)) == T(0)));
+ DUNE_TEST_CHECK(bool(T(T(1)%T(1)) == T(0)));
+ }
+ template<class T>
+ void checkInfixRem(Floating) {}
+
+ //! check infix `+`
+ /**
+ * Applies to boolean, integral, and floating point.
+ */
+ template<class T>
+ void checkInfixPlus(Arithmetic arithmetic_tag)
+ {
+ DUNE_TEST_FUNCTION(T, arithmetic_tag);
+
+ DUNE_TEST_CHECK(bool(T(T(0)+T(0)) == T(0)));
+ DUNE_TEST_CHECK(bool(T(T(1)+T(0)) == T(1)));
+ DUNE_TEST_CHECK(bool(T(T(0)+T(1)) == T(1)));
+ DUNE_TEST_CHECK(bool(T(T(1)+T(1)) == T(2)));
+ }
+
+ //! check infix `-`
+ /**
+ * Applies to boolean, integral, and floating point.
+ */
+ template<class T>
+ void checkInfixMinus(Arithmetic arithmetic_tag)
+ {
+ DUNE_TEST_FUNCTION(T, arithmetic_tag);
+
+ DUNE_TEST_CHECK(bool(T(T(0)-T(0)) == T( 0)));
+ DUNE_TEST_CHECK(bool(T(T(1)-T(0)) == T( 1)));
+ DUNE_TEST_CHECK(bool(T(T(0)-T(1)) == T(-1)));
+ DUNE_TEST_CHECK(bool(T(T(1)-T(1)) == T( 0)));
+ }
+
+ //! check infix `<<`
+ /**
+ * Applies to boolean and integral.
+ */
+ template<class T>
+ void checkInfixLShift(Arithmetic arithmetic_tag)
+ {
+ DUNE_TEST_FUNCTION(T, arithmetic_tag);
+
+ DUNE_TEST_CHECK(bool(T(T(0)<<T(0)) == T(0)));
+ DUNE_TEST_CHECK(bool(T(T(1)<<T(0)) == T(1)));
+ DUNE_TEST_CHECK(bool(T(T(0)<<T(1)) == T(0)));
+ DUNE_TEST_CHECK(bool(T(T(1)<<T(1)) == T(2)));
+ }
+ template<class T>
+ void checkInfixLShift(Floating) {}
+
+ //! check infix `>>`
+ /**
+ * Applies to boolean and integral.
+ */
+ template<class T>
+ void checkInfixRShift(Arithmetic arithmetic_tag)
+ {
+ DUNE_TEST_FUNCTION(T, arithmetic_tag);
+
+ DUNE_TEST_CHECK(bool(T(T(0)>>T(0)) == T(0)));
+ DUNE_TEST_CHECK(bool(T(T(1)>>T(0)) == T(1)));
+ DUNE_TEST_CHECK(bool(T(T(0)>>T(1)) == T(0)));
+ DUNE_TEST_CHECK(bool(T(T(1)>>T(1)) == T(0)));
+ }
+ template<class T>
+ void checkInfixRShift(Floating) {}
+
+ //! check infix `<`
+ /**
+ * Applies to boolean, integral, and floating point.
+ */
+ template<class T>
+ void checkInfixLess(Arithmetic arithmetic_tag)
+ {
+ DUNE_TEST_FUNCTION(T, arithmetic_tag);
+
+ DUNE_TEST_CHECK(bool(T(0)<T(0)) == false);
+ DUNE_TEST_CHECK(bool(T(1)<T(0)) == false);
+ DUNE_TEST_CHECK(bool(T(0)<T(1)) == true );
+ DUNE_TEST_CHECK(bool(T(1)<T(1)) == false);
+ }
+ template<class T>
+ void checkInfixLess(Signed arithmetic_tag)
+ {
+ DUNE_TEST_FUNCTION(T, arithmetic_tag);
+
+ checkInfixLess<T>(Integral{});
+
+ DUNE_TEST_CHECK(bool(T(-1)<T( 0)) == true);
+ }
+ template<class T>
+ void checkInfixLess(Unsigned arithmetic_tag)
+ {
+ DUNE_TEST_FUNCTION(T, arithmetic_tag);
+
+ checkInfixLess<T>(Integral{});
+
+ DUNE_TEST_CHECK(bool(T(-1)<T( 0)) == false);
+ }
+
+ //! check infix `>`
+ /**
+ * Applies to boolean, integral, and floating point.
+ */
+ template<class T>
+ void checkInfixGreater(Arithmetic arithmetic_tag)
+ {
+ DUNE_TEST_FUNCTION(T, arithmetic_tag);
+
+ int values[] = { -1, 0, 1 };
+ for(int i : values)
+ for(int j : values)
+ DUNE_TEST_CHECK(bool(T(i) > T(j)) == bool(T(j) < T(i)));
+ }
+
+ //! check infix `<=`
+ /**
+ * Applies to boolean, integral, and floating point.
+ */
+ template<class T>
+ void checkInfixLessEqual(Arithmetic arithmetic_tag)
+ {
+ DUNE_TEST_FUNCTION(T, arithmetic_tag);
+
+ int values[] = { -1, 0, 1 };
+ for(int i : values)
+ for(int j : values)
+ DUNE_TEST_CHECK(bool(T(i) <= T(j)) != bool(T(j) < T(i)));
+ }
+
+ //! check infix `>=`
+ /**
+ * Applies to boolean, integral, and floating point.
+ */
+ template<class T>
+ void checkInfixGreaterEqual(Arithmetic arithmetic_tag)
+ {
+ DUNE_TEST_FUNCTION(T, arithmetic_tag);
+
+ int values[] = { -1, 0, 1 };
+ for(int i : values)
+ for(int j : values)
+ DUNE_TEST_CHECK(bool(T(i) >= T(j)) != bool(T(i) < T(j)));
+ }
+
+ //! check infix `&`
+ /**
+ * Applies to boolean and integral.
+ */
+ template<class T>
+ void checkInfixBitAnd(Arithmetic arithmetic_tag)
+ {
+ DUNE_TEST_FUNCTION(T, arithmetic_tag);
+
+ for(int i = 0; i < 4; ++i)
+ for(int j = 0; j < 4; ++j)
+ DUNE_TEST_CHECK(bool(T(T(i) & T(j)) == T(i&j)));
+ }
+ template<class T>
+ void checkInfixBitAnd(Boolean arithmetic_tag)
+ {
+ DUNE_TEST_FUNCTION(T, arithmetic_tag);
+
+ for(int i = 0; i < 2; ++i)
+ for(int j = 0; j < 2; ++j)
+ DUNE_TEST_CHECK(bool(T(T(i) & T(j)) == T(i&j)));
+ }
+ template<class T>
+ void checkInfixBitAnd(Floating) {}
+
+ //! check infix `^`
+ /**
+ * Applies to boolean and integral.
+ */
+ template<class T>
+ void checkInfixBitXor(Arithmetic arithmetic_tag)
+ {
+ DUNE_TEST_FUNCTION(T, arithmetic_tag);
+
+ for(int i = 0; i < 4; ++i)
+ for(int j = 0; j < 4; ++j)
+ DUNE_TEST_CHECK(bool(T(T(i) ^ T(j)) == T(i^j)));
+ }
+ template<class T>
+ void checkInfixBitXor(Boolean arithmetic_tag)
+ {
+ DUNE_TEST_FUNCTION(T, arithmetic_tag);
+
+ for(int i = 0; i < 2; ++i)
+ for(int j = 0; j < 2; ++j)
+ // compare the bit-flipped versions so we don't depend on the number
+ // of bits in T.
+ DUNE_TEST_CHECK(bool(T(~T(T(i) ^ T(j))) == T(~(i^j))));
+ }
+ template<class T>
+ void checkInfixBitXor(Floating) {}
+
+ //! check infix `|`
+ /**
+ * Applies to boolean and integral.
+ */
+ template<class T>
+ void checkInfixBitOr(Arithmetic arithmetic_tag)
+ {
+ DUNE_TEST_FUNCTION(T, arithmetic_tag);
+
+ for(int i = 0; i < 4; ++i)
+ for(int j = 0; j < 4; ++j)
+ DUNE_TEST_CHECK(bool(T(T(i) | T(j)) == T(i|j)));
+ }
+ template<class T>
+ void checkInfixBitOr(Boolean arithmetic_tag)
+ {
+ DUNE_TEST_FUNCTION(T, arithmetic_tag);
+
+ for(int i = 0; i < 2; ++i)
+ for(int j = 0; j < 2; ++j)
+ DUNE_TEST_CHECK(bool(T(T(i) | T(j)) == T(i|j)));
+ }
+ template<class T>
+ void checkInfixBitOr(Floating) {}
+
+ //! check infix `&&`
+ /**
+ * Applies to boolean, integral and floating point.
+ */
+ template<class T>
+ void checkInfixAnd(Arithmetic arithmetic_tag)
+ {
+ DUNE_TEST_FUNCTION(T, arithmetic_tag);
+
+ for(int i = 0; i < 4; ++i)
+ for(int j = 0; j < 4; ++j)
+ DUNE_TEST_CHECK(bool(T(i) && T(j)) == (i && j));
+ }
+
+ //! check infix `||`
+ /**
+ * Applies to boolean, integral and floating point.
+ */
+ template<class T>
+ void checkInfixOr(Arithmetic arithmetic_tag)
+ {
+ DUNE_TEST_FUNCTION(T, arithmetic_tag);
+
+ for(int i = 0; i < 4; ++i)
+ for(int j = 0; j < 4; ++j)
+ DUNE_TEST_CHECK(bool(T(i) || T(j)) == (i || j));
+ }
+
+ //
+ // checks for compound assignment operators
+ //
+
+#define DUNE_TEST_PEEL(...) __VA_ARGS__
+#define DUNE_TEST_ASSIGN(OP, name, Tag, lrange, rrange) \
+ template<class T> \
+ void checkAssign##name(Tag arithmetic_tag) \
+ { \
+ DUNE_TEST_FUNCTION(T, arithmetic_tag); \
+ \
+ for(int i : { DUNE_TEST_PEEL lrange }) \
+ for(int j : { DUNE_TEST_PEEL rrange }) \
+ { \
+ T t(i); \
+ DUNE_TEST_CHECK(bool((t OP##= T(j)) == T(T(i) OP T(j)))); \
+ DUNE_TEST_CHECK(bool(t == T(T(i) OP T(j)))); \
+ } \
+ }
+
+#define DUNE_TEST_ASSIGN_DISABLE(name, Tag) \
+ template<class T> \
+ void checkAssign##name(Tag) {}
+
+ DUNE_TEST_ASSIGN(*, Mul, Arithmetic, (0, 1, 2, 3), (0, 1, 2, 3))
+ DUNE_TEST_ASSIGN(/, Div, Arithmetic, (0, 1, 2, 3), ( 1, 2, 4))
+ DUNE_TEST_ASSIGN(%, Rem, Arithmetic, (0, 1, 2, 3), ( 1, 2, 3))
+ DUNE_TEST_ASSIGN_DISABLE(Rem, Floating)
+
+ DUNE_TEST_ASSIGN(+, Plus, Arithmetic, (0, 1, 2, 3), (0, 1, 2, 3))
+ DUNE_TEST_ASSIGN(-, Minus, Arithmetic, (0, 1, 2, 3), (0, 1, 2, 3))
+
+ DUNE_TEST_ASSIGN(<<, LShift, Integral, (0, 1, 2, 3), (0, 1, 2, 3))
+ DUNE_TEST_ASSIGN(>>, RShift, Integral, (0, 1, 2, 3), (0, 1, 2, 3))
+ DUNE_TEST_ASSIGN(<<, LShift, Boolean, (0, 1 ), (0, 1 ))
+ DUNE_TEST_ASSIGN(>>, RShift, Boolean, (0, 1 ), (0, 1 ))
+ DUNE_TEST_ASSIGN_DISABLE(LShift, Floating)
+ DUNE_TEST_ASSIGN_DISABLE(RShift, Floating)
+
+ DUNE_TEST_ASSIGN(&, BitAnd, Integral, (0, 1, 2, 3), (0, 1, 2, 3))
+ DUNE_TEST_ASSIGN(^, BitXor, Integral, (0, 1, 2, 3), (0, 1, 2, 3))
+ DUNE_TEST_ASSIGN(|, BitOr, Integral, (0, 1, 2, 3), (0, 1, 2, 3))
+ DUNE_TEST_ASSIGN_DISABLE(BitAnd, Floating)
+ DUNE_TEST_ASSIGN_DISABLE(BitXor, Floating)
+ DUNE_TEST_ASSIGN_DISABLE(BitOr, Floating)
+
+#undef DUNE_TEST_ASSIGN_DISABLE
+#undef DUNE_TEST_ASSIGN
+#undef DUNE_TEST_PEEL
+#undef DUNE_TEST_FUNCTION
+#undef DUNE_TEST_CHECK
+
+ //
+ // checks collections
+ //
+
+ //! run the full arithmetic type testsuite
+ /**
+ * `T` is the type to check. `Tag` determines the arithmetic category; it
+ * is one of the classes derived from `Arithmetic`. Alternatively, `Tag`
+ * may be one of the fundamental arithmetic types, in which case it will
+ * be converted to the appropriate category tag automatically.
+ *
+ * To check an extended unsigned integer type, you might use one of the
+ * following calls:
+ * \code
+ * test.checkArithmetic<MyExtUnsigned, unsigned>();
+ * test.checkArithmetic<MyExtUnsigned>(0u);
+ * test.checkArithmetic<MyExtUnsigned, ArithemticTestSuite::Unsigned>();
+ * test.checkArithmetic<MyExtUnsigned>(ArithemticTestSuite::Unsigned{});
+ * \endcode
+ */
+ template<class T, class Tag>
+ void checkArithmetic(Tag = Tag{})
+ {
+ auto arithmetic_tag = this->tag<Tag>();
+
+ checkDefaultConstruct<T>(arithmetic_tag);
+ checkExplicitIntConvert<T>(arithmetic_tag);
+ checkMoveConstruct<T>(arithmetic_tag);
+ checkCopyConstruct<T>(arithmetic_tag);
+ checkMoveAssign<T>(arithmetic_tag);
+ checkCopyAssign<T>(arithmetic_tag);
+ checkEqual<T>(arithmetic_tag);
+
+ checkPostfixInc<T>(arithmetic_tag);
+ checkPostfixDec<T>(arithmetic_tag);
+
+ checkPrefixPlus<T>(arithmetic_tag);
+ checkPrefixMinus<T>(arithmetic_tag);
+ checkPrefixNot<T>(arithmetic_tag);
+ checkPrefixBitNot<T>(arithmetic_tag);
+
+ checkPrefixInc<T>(arithmetic_tag);
+ checkPrefixDec<T>(arithmetic_tag);
+
+ checkInfixMul<T>(arithmetic_tag);
+ checkInfixDiv<T>(arithmetic_tag);
+ checkInfixRem<T>(arithmetic_tag);
+
+ checkInfixPlus<T>(arithmetic_tag);
+ checkInfixMinus<T>(arithmetic_tag);
+
+ checkInfixLShift<T>(arithmetic_tag);
+ checkInfixRShift<T>(arithmetic_tag);
+
+ checkInfixLess<T>(arithmetic_tag);
+ checkInfixGreater<T>(arithmetic_tag);
+ checkInfixLessEqual<T>(arithmetic_tag);
+ checkInfixGreaterEqual<T>(arithmetic_tag);
+
+ checkInfixBitAnd<T>(arithmetic_tag);
+ checkInfixBitXor<T>(arithmetic_tag);
+ checkInfixBitOr<T>(arithmetic_tag);
+
+ checkInfixAnd<T>(arithmetic_tag);
+ checkInfixOr<T>(arithmetic_tag);
+
+ checkAssignMul<T>(arithmetic_tag);
+ checkAssignDiv<T>(arithmetic_tag);
+ checkAssignRem<T>(arithmetic_tag);
+
+ checkAssignPlus<T>(arithmetic_tag);
+ checkAssignMinus<T>(arithmetic_tag);
+
+ checkAssignLShift<T>(arithmetic_tag);
+ checkAssignRShift<T>(arithmetic_tag);
+
+ checkAssignBitAnd<T>(arithmetic_tag);
+ checkAssignBitXor<T>(arithmetic_tag);
+ checkAssignBitOr<T>(arithmetic_tag);
+ }
+ };
+
+#if __GNUC__ >= 7
+# pragma GCC diagnostic pop
+#endif
+
+} // namespace Dune
+
+#endif // DUNE_COMMON_TEST_ARITHMETICTESTSUITE_HH
--- /dev/null
+#include <config.h>
+
+#include <tuple>
+
+#include <dune/common/hybridutilities.hh>
+#include <dune/common/parallel/mpihelper.hh>
+#include <dune/common/test/arithmetictestsuite.hh>
+
+int main(int argc, char **argv)
+{
+ Dune::MPIHelper::instance(argc, argv);
+
+ Dune::ArithmeticTestSuite test;
+
+ using ArithmeticTypes = std::tuple<
+ bool,
+ char, signed char, unsigned char,
+ short, unsigned short,
+ int, unsigned,
+ long, long unsigned,
+ long long, long long unsigned,
+ wchar_t, char16_t, char32_t,
+ float, double, long double>;
+
+ Dune::Hybrid::forEach(ArithmeticTypes(), [&](auto val) {
+ using T = decltype(val);
+ test.checkArithmetic<T, T>();
+ });
+
+ return test.exit();
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <dune/common/arraylist.hh>
+#include <dune/common/test/iteratortest.hh>
+#include <iostream>
+#include <cstdlib>
+#include <algorithm>
+
+class Double {
+public:
+ double val;
+ Double() : val(0){}
+ Double(double d) : val(d){}
+ Double& operator=(double d){
+ val=d;
+ return *this;
+ }
+};
+
+bool operator<(Double a, Double b){
+ return a.val<b.val;
+}
+
+template<class T, int size>
+void randomizeList(Dune::ArrayList<T,size>& alist){
+ using namespace Dune;
+
+ srand(300);
+
+ int lowest=0, highest=1000, range=(highest-lowest)+1;
+
+ for(int i=0; i < 250; i++)
+ alist.push_back(T(static_cast<int>(range*(rand()/(RAND_MAX+1.0)))));
+}
+
+int testSorting(){
+ using namespace Dune;
+ ArrayList<double,10> alist;
+
+ randomizeList(alist);
+ std::sort(alist.begin(), alist.end());
+ double last=-1;
+
+ for(ArrayList<double,10>::iterator iter=alist.begin(), end=alist.end();
+ iter != end; ++iter) {
+ if((*iter)>=last) {
+ last=*iter;
+ }else{
+ std::cerr << last<<">"<<(*iter)<<" List is not sorted! "<<__FILE__ <<":"<<__LINE__<<std::endl;
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+template<int size>
+void initConsecutive(Dune::ArrayList<double,size>& alist){
+ using namespace Dune;
+
+ for(int i=0; i < 100; i++)
+ alist.push_back(i);
+}
+
+int testIteratorRemove(){
+ using namespace Dune;
+ ArrayList<double,10> alist;
+ initConsecutive(alist);
+ ArrayList<double,10>::iterator iter=alist.begin();
+
+ iter+=8;
+
+ iter.eraseToHere();
+ ++iter;
+
+ if((*iter)!=10) {
+ std::cerr<<"Removing from iterator failed! "<<__FILE__<<":"<<__LINE__<<std::endl;
+ return 1;
+ }
+
+ iter = alist.begin();
+
+ if((*iter)!=9) {
+ std::cerr<<"Removing from iterator failed! "<<__FILE__<<":"<<__LINE__<<std::endl;
+ return 1;
+ }
+
+ iter +=3;
+ iter.eraseToHere();
+ iter +=4;
+
+ if((*iter)!=17) {
+ std::cerr<<"Removing from iterator failed! "<<__FILE__<<":"<<__LINE__<<std::endl;
+ return 1;
+ }
+
+ alist.purge();
+ if(*(alist.begin())!=13) {
+ std::cerr<<"Purging iterator failed! "<<__FILE__<<":"<<__LINE__<<std::endl;
+ return 1;
+ }
+ return 0;
+}
+int testRandomAccess(){
+ using namespace Dune;
+ ArrayList<double,10> alist;
+ initConsecutive(alist);
+
+ ArrayList<double,10>::iterator iter=alist.begin();
+
+
+ for(int i=0; i < 100; i++) {
+ if(iter[i]!=i) {
+ std::cerr << "Random Access failed: "<<iter[i]<<"!="<<i<<" "<< __FILE__ <<":"<<__LINE__<<std::endl;
+ return 1;
+ }
+
+ if(*(iter+i)!=i) {
+ std::cerr << "Random Access failed "<< __FILE__ <<":"<<__LINE__<<std::endl;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+int testComparison(){
+ using namespace Dune;
+ ArrayList<double,10> alist;
+ initConsecutive(alist);
+
+ ArrayList<double,10>::iterator iter=alist.begin(), iter1=alist.begin();
+ iter1=iter+5;
+ iter1=iter-5;
+ iter1=iter+5;
+
+
+ if(!(iter<iter1)) {
+ std::cerr<<*iter<<">="<<*iter1<<" Operator< seems to be wrong! "<< __FILE__ <<__LINE__<<std::endl;
+ return 1;
+ }
+
+ if(!(iter1>iter)) {
+ std::cerr<<"operator> seems to be wrong! "<< __FILE__ <<__LINE__<<std::endl;
+ return 1;
+ }
+
+ if(!(iter<=iter1)) {
+ std::cerr<<"operator<= seems to be wrong! "<< __FILE__ <<__LINE__<<std::endl;
+ return 1;
+ }
+
+ if(!(iter1>=iter)) {
+ std::cerr<<"operator>= seems to be wrong! "<< __FILE__ <<__LINE__<<std::endl;
+ return 1;
+ }
+
+ if(!(iter1 != iter)) {
+ std::cerr<<"operator!= seems to be wrong! "<< __FILE__ <<__LINE__<<std::endl;
+ return 1;
+ }
+
+ if(!(iter1 == iter+5)) {
+ std::cerr<<"operator== seems to be wrong! "<< __FILE__ <<__LINE__<<std::endl;
+ return 1;
+ }
+ return 0;
+}
+
+
+int main(){
+ using namespace Dune;
+ using namespace std;
+ ArrayList<double,100> alist;
+
+ randomizeList(alist);
+
+ int ret=testIterator(alist);
+
+ if(0!=testComparison()) {
+ ret++;
+ cerr<< "Comparison failed!"<<endl;
+ }
+
+ if(0!=testRandomAccess()) {
+ ret++;
+ cerr<< "Random Access failed!"<<endl;
+ }
+
+ if(0!=testSorting()) {
+ ret++;
+ cerr<< "Sorting failed!"<<endl;
+ }
+
+ if(0!=testIteratorRemove()) {
+ ret++;
+ cerr<< "Erasing failed!"<<endl;
+ }
+ return ret;
+
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <array>
+#include <iostream>
+
+#include <dune/common/std/make_array.hh>
+#include <dune/common/classname.hh>
+#include <dune/common/fvector.hh>
+#include <dune/common/streamoperators.hh>
+
+template<class T, std::size_t n>
+void f(const std::array<T, n> &a)
+{
+ using Dune::operator<<;
+ std::cout << "Got a " << Dune::className(a) << " with elements " << a << std::endl;
+}
+
+int main() {
+ // check that make_array works
+ f(Dune::Std::make_array(1, 2));
+ f(Dune::Std::make_array(1, 2, 3));
+ f(Dune::Std::make_array(1, 2, 3, 4));
+ f(Dune::Std::make_array(1, 2, 3, 4, 5));
+ f(Dune::Std::make_array(1, 2, 3, 4, 5, 6));
+ f(Dune::Std::make_array(1, 2, 3, 4, 5, 6, 7));
+ f(Dune::Std::make_array(1, 2, 3, 4, 5, 6, 7, 8));
+ f(Dune::Std::make_array(1, 2, 3, 4, 5, 6, 7, 8, 9));
+ f(Dune::Std::make_array(1, 2, 3, 4, 5, 6, 7, 8, 9, 10));
+
+ Dune::FieldVector<double, 2> x(0);
+ f(Dune::Std::make_array(x, x));
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#include <config.h>
+
+#ifdef NDEBUG
+ #undef NDEBUG
+#endif
+#ifdef TEST_NDEBUG
+ #define NDEBUG TEST_NDEBUG
+#endif
+
+#include <cassert>
+#include <dune/common/assertandreturn.hh>
+#include <dune/common/exceptions.hh>
+#include <dune/common/parallel/mpihelper.hh>
+
+struct Foo
+{
+ static constexpr auto lessAndReturn([[maybe_unused]] int a, [[maybe_unused]] int b, int x)
+ {
+ return DUNE_ASSERT_AND_RETURN(a<b, x);
+ }
+};
+
+
+int main ( int argc, char **argv )
+try
+{
+ using namespace Dune;
+
+ MPIHelper::instance(argc, argv);
+
+ // This should not fail since 0<2
+ if (Foo::lessAndReturn(0,2,3) != 3)
+ DUNE_THROW(Dune::Exception, "DUNE_ASSERT_AND_RETURN returned incorrect value in dynamic context");
+
+ // This should not fail since 0<2
+ if (std::integral_constant<int, Foo::lessAndReturn(0,2,3)>::value != 3)
+ DUNE_THROW(Dune::Exception, "DUNE_ASSERT_AND_RETURN returned incorrect value in constexpr context");
+
+// If EXPECT_FAIL would work with failing assertions,
+// we could test if the assertion is triggered with
+// a target
+//
+// dune_add_test(NAME assertandreturntest_runtime_fail
+// SOURCES assertandreturntest.cc
+// LINK_LIBRARIES dunecommon
+// COMPILE_DEFINITIONS "TEST_RUNTIME_FAIL"
+// EXPECT_FAIL
+// LABELS quick)
+//
+// and the following code:
+#ifdef TEST_RUNTIME_FAIL
+ // This should fail at runtime because 0>-3
+ if (Foo::lessAndReturn(0,-1,3) != 3)
+ DUNE_THROW(Dune::Exception, "DUNE_ASSERT_AND_RETURN returned incorrect value in dynamic context");
+#endif
+
+#ifdef TEST_COMPILETIME_FAIL
+ // This should fail at compile time because 0>-3
+ if (std::integral_constant<int, Foo::lessAndReturn(0,-1,3)>::value != 3)
+ DUNE_THROW(Dune::Exception, "DUNE_ASSERT_AND_RETURN returned incorrect value in constexpr context");
+#endif
+
+#ifdef TEST_NDEBUG
+ // This should not fail because NDEBUG is set
+ if (Foo::lessAndReturn(0,-1,3) != 3)
+ DUNE_THROW(Dune::Exception, "DUNE_ASSERT_AND_RETURN returned incorrect value in dynamic context");
+
+ // This should not fail because NDEBUG is set
+ if (std::integral_constant<int, Foo::lessAndReturn(0,-1,3)>::value != 3)
+ DUNE_THROW(Dune::Exception, "DUNE_ASSERT_AND_RETURN returned incorrect value in constexpr context");
+#endif
+
+ return 0;
+}
+catch( Dune::Exception &e )
+{
+ std::cerr << "Dune reported error: " << e << std::endl;
+ return 1;
+}
+catch(...)
+{
+ std::cerr << "Unknown exception thrown!" << std::endl;
+ return 1;
+}
--- /dev/null
+#include "config.h"
+
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+#include <dune/common/typetraits.hh>
+#include <dune/common/typeutilities.hh>
+
+template<class T>
+constexpr auto doAutoCopy(T &&v)
+{
+ return Dune::autoCopy(std::forward<T>(v));
+}
+
+// an example expression object that evaluates to int(0)
+struct ZeroExpr {
+ constexpr operator int() const volatile { return 0; }
+};
+
+namespace Dune {
+ template<>
+ struct AutonomousValueType<ZeroExpr> { using type = int; };
+
+ // doAutoCopy should not pick up this overload
+ constexpr auto autoCopy(ZeroExpr) = delete;
+} // namespace Dune
+
+int main()
+{
+
+ {
+ std::vector<bool> v{true};
+ auto ref = v[0];
+ static_assert(!std::is_same<decltype(ref), bool>::value,
+ "sanity check failed");
+ auto val = Dune::autoCopy(v[0]);
+ static_assert(std::is_same<decltype(val), bool>::value,
+ "vector<bool>::reference not resolved");
+ }
+
+ {
+ constexpr ZeroExpr zexpr{};
+ auto val = doAutoCopy(zexpr);
+ static_assert(std::is_same<decltype(val), int>::value,
+ "Custom type was not resolved");
+
+ static_assert(doAutoCopy(zexpr) == 0,
+ "Resolution is not constexpr");
+ }
+
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <cstdint>
+#include <limits>
+#include <iostream>
+
+#include <dune/common/bigunsignedint.hh>
+#include <dune/common/hash.hh>
+
+#define CHECK(x) \
+ do { \
+ if (!(x)) { \
+ pass = false; \
+ std::cerr << "FAILED: " << #x << std::endl; \
+ } \
+ } while(false)
+
+int main()
+{
+ bool pass = true;
+
+ typedef Dune::bigunsignedint<16> ShortInteger;
+ typedef Dune::bigunsignedint<128> BigInteger;
+
+ /* Test std::numeric_limits for ShortInteger (should be same as for uint16_t) */
+ CHECK(std::numeric_limits<ShortInteger>::min() == std::numeric_limits<std::uint16_t>::min());
+ CHECK(std::numeric_limits<ShortInteger>::max() == std::numeric_limits<std::uint16_t>::max());
+ CHECK(std::numeric_limits<ShortInteger>::digits == std::numeric_limits<std::uint16_t>::digits);
+ CHECK(std::numeric_limits<ShortInteger>::epsilon() == std::numeric_limits<std::uint16_t>::epsilon());
+ CHECK(std::numeric_limits<ShortInteger>::round_error() == std::numeric_limits<std::uint16_t>::round_error());
+
+ CHECK(std::numeric_limits<ShortInteger>::is_exact);
+ CHECK(std::numeric_limits<ShortInteger>::is_integer);
+ CHECK(!std::numeric_limits<ShortInteger>::is_signed);
+
+ /* Test std::numeric_limits for BigInteger */
+ CHECK(std::numeric_limits<BigInteger>::min() == 0u);
+ CHECK(std::numeric_limits<BigInteger>::digits == 128);
+ CHECK(std::numeric_limits<BigInteger>::epsilon() == 0u);
+ CHECK(std::numeric_limits<BigInteger>::round_error() == 0u);
+
+ CHECK(std::numeric_limits<BigInteger>::is_exact);
+ CHECK(std::numeric_limits<BigInteger>::is_integer);
+ CHECK(!std::numeric_limits<BigInteger>::is_signed);
+
+ /* Test constructor */
+ CHECK(BigInteger(10u) == 10u);
+ CHECK(BigInteger(10) == BigInteger(10u));
+
+ try {
+ BigInteger tmp(-10);
+ pass = false;
+ std::cerr << "FAILED: BigInteger(-10) should throw an exception." << std::endl;
+ }
+ catch(const Dune::Exception&) {
+ /* Ignore */
+ }
+ catch(...) {
+ pass = false;
+ std::cerr << "FAILED: BigInteger(-10) threw an unexpected exception." << std::endl;
+ }
+
+ /* Test conversion */
+ CHECK(BigInteger(10u).touint() == 10u);
+ CHECK(BigInteger(10u).todouble() == 10.0);
+
+ /* Check BigInteger arithmetic */
+ CHECK(BigInteger(10u) + BigInteger(3u) == BigInteger(10u + 3u));
+ BigInteger tmp(10u); tmp += BigInteger(3u);
+ CHECK(tmp == BigInteger(10u + 3u));
+ CHECK(BigInteger(10u) - BigInteger(3u) == BigInteger(10u - 3u));
+ tmp = BigInteger(10u); tmp -= BigInteger(3u);
+ CHECK(tmp == BigInteger(10u - 3u));
+ CHECK(BigInteger(10u) * BigInteger(3u) == BigInteger(10u * 3u));
+ tmp = BigInteger(10u); tmp *= BigInteger(3u);
+ CHECK(tmp == BigInteger(10u * 3u));
+ CHECK(BigInteger(10u) / BigInteger(3u) == BigInteger(10u / 3u));
+ tmp = BigInteger(10u); tmp /= BigInteger(3u);
+ CHECK(tmp == BigInteger(10u / 3u));
+ CHECK(BigInteger(10u) % BigInteger(3u) == BigInteger(10u % 3u));
+ tmp = BigInteger(10u); tmp %= BigInteger(3u);
+ CHECK(tmp == BigInteger(10u % 3u));
+
+ CHECK(BigInteger(100000u) + BigInteger(30000u) == BigInteger(100000u + 30000u));
+ tmp = BigInteger(100000u); tmp += BigInteger(30000u);
+ CHECK(tmp == BigInteger(100000u + 30000u));
+ CHECK(BigInteger(100000u) - BigInteger(30000u) == BigInteger(100000u - 30000u));
+ tmp = BigInteger(100000u); tmp -= BigInteger(30000u);
+ CHECK(tmp == BigInteger(100000u - 30000u));
+ CHECK(BigInteger(70000u) - BigInteger(30000u) == BigInteger(70000u - 30000u));
+ tmp = BigInteger(70000u); tmp -= BigInteger(30000u);
+ CHECK(tmp == BigInteger(70000u - 30000u));
+ CHECK(BigInteger(100000u) * BigInteger(30000u) == BigInteger(100000u * 30000u));
+ tmp = BigInteger(100000u); tmp *= BigInteger(30000u);
+ CHECK(tmp == BigInteger(100000u * 30000u));
+ CHECK(BigInteger(100000u) / BigInteger(30000u) == BigInteger(100000u / 30000u));
+ tmp = BigInteger(100000u); tmp /= BigInteger(30000u);
+ CHECK(tmp == BigInteger(100000u / 30000u));
+ CHECK(BigInteger(100000u) % BigInteger(30000u) == BigInteger(100000u % 30000u));
+ tmp = BigInteger(100000u); tmp %= BigInteger(30000u);
+ CHECK(tmp == BigInteger(100000u % 30000u));
+
+ /* Test hashing */
+ {
+ Dune::hash<BigInteger> hasher;
+ CHECK(hasher(BigInteger(100)) == hasher(BigInteger(100)));
+ }
+ const BigInteger one{1};
+ const BigInteger zero{0};
+ CHECK((one & one) == one);
+ CHECK((one & zero) == zero);
+ CHECK((one | one) == one);
+ CHECK((one | zero) == one);
+ CHECK((one ^ one) == zero);
+ CHECK((one ^ zero) == one);
+
+ return pass ? 0 : 1;
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <dune/common/bitsetvector.hh>
+
+#if defined(__GNUC__) && ! defined(__clang__)
+#include <ext/malloc_allocator.h>
+#endif
+
+#include <dune/common/test/iteratortest.hh>
+
+template<class BBF>
+struct ConstReferenceOp
+{
+ typedef typename BBF::value_type bitset;
+ typedef typename BBF::const_reference const_reference;
+
+ void operator()(const_reference t){
+ [[maybe_unused]] bitset x = t;
+ }
+};
+
+template <class T>
+void testConstBitSetMethods(const T t)
+{
+ t.size();
+ t[0];
+ t[t.size()-1];
+ t << 2;
+ t >> 2;
+ ~t;
+ t.count();
+ t.any();
+ t.none();
+ t.test(0);
+}
+
+template<class BBF>
+void testContainer(BBF & bbf)
+{
+ typedef typename BBF::value_type bitset;
+ typedef typename BBF::reference reference;
+ typedef typename BBF::const_reference const_reference;
+
+ const BBF & cbbf = bbf;
+
+ bitset x = bbf[3];
+ reference y = bbf[4];
+ const_reference z = bbf[4];
+ const reference v = bbf[4];
+
+ // assignement
+ y = false;
+ y[2] = true;
+ y = x;
+ y = z;
+ y = v;
+ x = y;
+ x = z;
+ x = v;
+ y = cbbf[1];
+ x = cbbf[1];
+ bbf[4] = x;
+ bbf[4] = v;
+ bbf[4] = y;
+ bbf[4] = true;
+
+ // invoke methods
+ testConstBitSetMethods(x);
+ testConstBitSetMethods(y);
+ testConstBitSetMethods(z);
+ testConstBitSetMethods(v);
+ testConstBitSetMethods(bbf[1]);
+ testConstBitSetMethods(cbbf[2]);
+
+ // equality
+ [[maybe_unused]] bool res;
+ res = (y == cbbf[2]);
+ res = (y == bbf[3]);
+ res = (y == x);
+ res = (x == y);
+ res = (x == z);
+ res = (z == x);
+ res = (z == y);
+ res = (y == z);
+
+ // inequality
+ res = (y != cbbf[2]);
+ res = (y != bbf[3]);
+ res = (y != x);
+ res = (x != y);
+ res = (x != z);
+ res = (z != x);
+ res = (z != y);
+ res = (y != z);
+
+ // &=
+ y &= cbbf[2];
+ y &= bbf[3];
+ y &= x;
+ x &= y;
+ x &= z;
+ y &= z;
+
+ // |=
+ y |= cbbf[2];
+ y |= bbf[3];
+ y |= x;
+ x |= y;
+ x |= z;
+ y |= z;
+
+ // ^=
+ y ^= cbbf[2];
+ y ^= bbf[3];
+ y ^= x;
+ x ^= y;
+ x ^= z;
+ y ^= z;
+
+ // shift operator
+ y <<= 1;
+ y >>= 1;
+
+ // flip
+ y.flip();
+ y.flip(2);
+ y[3].flip();
+}
+
+template<class BBF>
+void testConstContainer(const BBF& bbf){
+ typedef typename BBF::value_type bitset;
+ typedef typename BBF::iterator iterator;
+ typedef typename std::iterator_traits<iterator>::value_type value_type;
+ typedef typename BBF::const_reference reference;
+
+ const BBF & cbbf = bbf;
+
+ bitset x = bbf[3];
+ [[maybe_unused]] value_type z;
+ reference y = bbf[4];
+
+ // assignement
+ x = y;
+ x = cbbf[1];
+
+ // equality
+ [[maybe_unused]] bool res;
+ res = (y == cbbf[2]);
+ res = (y == bbf[3]);
+ res = (y == x);
+ res = (x == y);
+
+ // inequality
+ res = (y != cbbf[2]);
+ res = (y != bbf[3]);
+ res = (y != x);
+ res = (x != y);
+}
+
+template<int block_size, class Alloc>
+void doTest() {
+ typedef Dune::BitSetVector<block_size, Alloc> BBF;
+
+ BBF bbf(10,true);
+ const BBF & cbbf = bbf;
+
+ // test containers and some basic bitset operations
+ testContainer(bbf);
+ testConstContainer(bbf);
+ testConstContainer(cbbf);
+
+ // iterator interface
+#ifndef NDEBUG
+ ConstReferenceOp<BBF> cop;
+ assert(testIterator(bbf, cop) == 0);
+ assert(testIterator(cbbf, cop) == 0);
+#endif
+}
+
+int main()
+{
+ doTest<4, std::allocator<bool> >();
+#if defined(__GNUC__) && ! defined(__clang__)
+ doTest<4, __gnu_cxx::malloc_allocator<bool> >();
+#endif
+ return 0;
+}
--- /dev/null
+#include <dune/common/exceptions.hh>
+#include <dune/common/fvector.hh>
+#include <dune/common/fmatrix.hh>
+#include <dune/common/dynmatrix.hh>
+
+int main() try {
+ bool passed = true;
+
+ // Free matrix-vector multiplication (mv): Input size incorrect
+ try {
+ Dune::FieldMatrix<double, 2, 3> A = {{1, 2, 3}, {10, 20, 30}};
+ Dune::FieldVector<double, 2> x = {1, 2};
+ Dune::FieldVector<double, 2> b(0);
+ Dune::DenseMatrixHelp::multAssign(A,x,b);
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+ // Free matrix-vector multiplication (mv): Output size incorrect
+ try {
+ Dune::FieldMatrix<double, 2, 3> A = {{1, 2, 3}, {10, 20, 30}};
+ Dune::FieldVector<double, 3> x = {1, 2, 3};
+ Dune::FieldVector<double, 3> b(0);
+ Dune::DenseMatrixHelp::multAssign(A,x,b);
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+
+ // Matrix-vector multiplication (mv): Input size incorrect
+ try {
+ Dune::FieldMatrix<double, 2, 3> A = {{1, 2, 3}, {10, 20, 30}};
+ Dune::FieldVector<double, 2> x = {1, 2};
+ Dune::FieldVector<double, 2> b(0);
+ A.mv(x, b);
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+ // Matrix-vector multiplication (mv): Output size incorrect
+ try {
+ Dune::FieldMatrix<double, 2, 3> A = {{1, 2, 3}, {10, 20, 30}};
+ Dune::FieldVector<double, 3> x = {1, 2, 3};
+ Dune::FieldVector<double, 3> b(0);
+ A.mv(x, b);
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+
+ // Matrix-vector multiplication (mtv): Input size incorrect
+ try {
+ Dune::FieldMatrix<double, 2, 3> A = {{1, 2, 3}, {10, 20, 30}};
+ Dune::FieldVector<double, 3> x = {1, 2, 3};
+ Dune::FieldVector<double, 3> b(0);
+ A.mtv(x, b);
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+ // Matrix-vector multiplication (mtv): Output size incorrect
+ try {
+ Dune::FieldMatrix<double, 2, 3> A = {{1, 2, 3}, {10, 20, 30}};
+ Dune::FieldVector<double, 2> x = {1, 2};
+ Dune::FieldVector<double, 2> b(0);
+ A.mtv(x, b);
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+
+ // Matrix-vector multiplication (umv): Input size incorrect
+ try {
+ Dune::FieldMatrix<double, 2, 3> A = {{1, 2, 3}, {10, 20, 30}};
+ Dune::FieldVector<double, 2> x = {1, 2};
+ Dune::FieldVector<double, 2> b(0);
+ A.umv(x, b);
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+ // Matrix-vector multiplication (umv): Output size incorrect
+ try {
+ Dune::FieldMatrix<double, 2, 3> A = {{1, 2, 3}, {10, 20, 30}};
+ Dune::FieldVector<double, 3> x = {1, 2, 3};
+ Dune::FieldVector<double, 3> b(0);
+ A.umv(x, b);
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+
+ // Matrix-vector multiplication (umtv): Input size incorrect
+ try {
+ Dune::FieldMatrix<double, 2, 3> A = {{1, 2, 3}, {10, 20, 30}};
+ Dune::FieldVector<double, 3> x = {1, 2, 3};
+ Dune::FieldVector<double, 3> b(0);
+ A.umtv(x, b);
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+ // Matrix-vector multiplication (umtv): Output size incorrect
+ try {
+ Dune::FieldMatrix<double, 2, 3> A = {{1, 2, 3}, {10, 20, 30}};
+ Dune::FieldVector<double, 2> x = {1, 2};
+ Dune::FieldVector<double, 2> b(0);
+ A.umtv(x, b);
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+
+ // Matrix-vector multiplication (umhv): Input size incorrect
+ try {
+ Dune::FieldMatrix<double, 2, 3> A = {{1, 2, 3}, {10, 20, 30}};
+ Dune::FieldVector<double, 3> x = {1, 2, 3};
+ Dune::FieldVector<double, 3> b(0);
+ A.umhv(x, b);
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+ // Matrix-vector multiplication (umhv): Output size incorrect
+ try {
+ Dune::FieldMatrix<double, 2, 3> A = {{1, 2, 3}, {10, 20, 30}};
+ Dune::FieldVector<double, 2> x = {1, 2};
+ Dune::FieldVector<double, 2> b(0);
+ A.umhv(x, b);
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+
+ // Matrix-vector multiplication (mmv): Input size incorrect
+ try {
+ Dune::FieldMatrix<double, 2, 3> A = {{1, 2, 3}, {10, 20, 30}};
+ Dune::FieldVector<double, 2> x = {1, 2};
+ Dune::FieldVector<double, 2> b(0);
+ A.mmv(x, b);
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+ // Matrix-vector multiplication (mmv): Output size incorrect
+ try {
+ Dune::FieldMatrix<double, 2, 3> A = {{1, 2, 3}, {10, 20, 30}};
+ Dune::FieldVector<double, 3> x = {1, 2, 3};
+ Dune::FieldVector<double, 3> b(0);
+ A.mmv(x, b);
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+
+ // Matrix-vector multiplication (mmtv): Input size incorrect
+ try {
+ Dune::FieldMatrix<double, 2, 3> A = {{1, 2, 3}, {10, 20, 30}};
+ Dune::FieldVector<double, 3> x = {1, 2, 3};
+ Dune::FieldVector<double, 3> b(0);
+ A.mmtv(x, b);
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+ // Matrix-vector multiplication (mmtv): Output size incorrect
+ try {
+ Dune::FieldMatrix<double, 2, 3> A = {{1, 2, 3}, {10, 20, 30}};
+ Dune::FieldVector<double, 2> x = {1, 2};
+ Dune::FieldVector<double, 2> b(0);
+ A.mmtv(x, b);
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+
+ // Matrix-vector multiplication (mmhv): Input size incorrect
+ try {
+ Dune::FieldMatrix<double, 2, 3> A = {{1, 2, 3}, {10, 20, 30}};
+ Dune::FieldVector<double, 3> x = {1, 2, 3};
+ Dune::FieldVector<double, 3> b(0);
+ A.mmhv(x, b);
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+ // Matrix-vector multiplication (mmhv): Output size incorrect
+ try {
+ Dune::FieldMatrix<double, 2, 3> A = {{1, 2, 3}, {10, 20, 30}};
+ Dune::FieldVector<double, 2> x = {1, 2};
+ Dune::FieldVector<double, 2> b(0);
+ A.mmhv(x, b);
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+
+ // Matrix-vector multiplication (usmv): Input size incorrect
+ try {
+ Dune::FieldMatrix<double, 2, 3> A = {{1, 2, 3}, {10, 20, 30}};
+ Dune::FieldVector<double, 2> x = {1, 2};
+ Dune::FieldVector<double, 2> b(0);
+ A.usmv(2, x, b);
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+ // Matrix-vector multiplication (usmv): Output size incorrect
+ try {
+ Dune::FieldMatrix<double, 2, 3> A = {{1, 2, 3}, {10, 20, 30}};
+ Dune::FieldVector<double, 3> x = {1, 2, 3};
+ Dune::FieldVector<double, 3> b(0);
+ A.usmv(2, x, b);
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+
+ // Matrix-vector multiplication (usmtv): Input size incorrect
+ try {
+ Dune::FieldMatrix<double, 2, 3> A = {{1, 2, 3}, {10, 20, 30}};
+ Dune::FieldVector<double, 3> x = {1, 2, 3};
+ Dune::FieldVector<double, 3> b(0);
+ A.usmtv(2, x, b);
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+ // Matrix-vector multiplication (usmtv): Output size incorrect
+ try {
+ Dune::FieldMatrix<double, 2, 3> A = {{1, 2, 3}, {10, 20, 30}};
+ Dune::FieldVector<double, 2> x = {1, 2};
+ Dune::FieldVector<double, 2> b(0);
+ A.usmtv(2, x, b);
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+
+ // Matrix-vector multiplication (usmhv): Input size incorrect
+ try {
+ Dune::FieldMatrix<double, 2, 3> A = {{1, 2, 3}, {10, 20, 30}};
+ Dune::FieldVector<double, 3> x = {1, 2, 3};
+ Dune::FieldVector<double, 3> b(0);
+ A.usmhv(2, x, b);
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+ // Matrix-vector multiplication (usmhv): Output size incorrect
+ try {
+ Dune::FieldMatrix<double, 2, 3> A = {{1, 2, 3}, {10, 20, 30}};
+ Dune::FieldVector<double, 2> x = {1, 2};
+ Dune::FieldVector<double, 2> b(0);
+ A.usmhv(2, x, b);
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+
+ // right multiplication: None-square Matrix argument
+ try {
+ Dune::DynamicMatrix<double> A(2, 3, 5);
+ Dune::DynamicMatrix<double> const B(3, 2, 5);
+ A.rightmultiply(B);
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+ // right multiplication: Incorrect number of rows
+ try {
+ Dune::DynamicMatrix<double> A(2, 2, 5);
+ Dune::DynamicMatrix<double> const B(3, 3, 5);
+ A.rightmultiply(B);
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+
+ // left multiplication: None-square Matrix argument
+ try {
+ Dune::FieldMatrix<double, 3, 2> A = {{1, 2}, {10, 20}, {100, 200}};
+ Dune::FieldMatrix<double, 2, 3> const B = {{1, 2, 3}, {10, 20, 30}};
+ A.leftmultiply(B);
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+ // left multiplication: Incorrect number of rows
+ try {
+ Dune::FieldMatrix<double, 3, 3> A = {
+ {1, 2, 3}, {10, 20, 30}, {100, 200, 300}};
+ Dune::FieldMatrix<double, 3, 2> const B = {{1, 2}, {10, 20}, {100, 200}};
+ A.leftmultiply(B);
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+
+ return passed ? 0 : 1;
+} catch (Dune::Exception &e) {
+ std::cerr << e << std::endl;
+ return 1;
+} catch (std::exception &e) {
+ std::cerr << e.what() << std::endl;
+ return 1;
+}
--- /dev/null
+#include <dune/common/exceptions.hh>
+#include <dune/common/fvector.hh>
+#include <dune/common/fmatrix.hh>
+
+int main() try {
+ bool passed = true;
+
+ // Add vectors of different sizes
+ try {
+ Dune::FieldVector<double, 3> v1 = {1, 2, 3};
+ Dune::FieldVector<double, 2> const v2 = {1, 2};
+ v1 += v2;
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+
+ // Subtract vectors of different sizes
+ try {
+ Dune::FieldVector<double, 3> v1 = {1, 2, 3};
+ Dune::FieldVector<double, 2> const v2 = {1, 2};
+ v1 -= v2;
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+
+ // Check vectors of different sizes for equality
+ try {
+ Dune::FieldVector<double, 3> const v1 = {1, 2, 3};
+ Dune::FieldVector<double, 2> const v2 = {1, 2};
+ [[maybe_unused]] bool res = (v1 == v2);
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+
+ // Check vectors of different sizes for inequality
+ try {
+ Dune::FieldVector<double, 3> const v1 = {1, 2, 3};
+ Dune::FieldVector<double, 2> const v2 = {1, 2};
+ [[maybe_unused]] bool res = (v1 != v2);
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+
+ // Apply axpy to vectors of different sizes
+ try {
+ Dune::FieldVector<double, 3> v1 = {1, 2, 3};
+ Dune::FieldVector<double, 2> const v2 = {1, 2};
+ v1.axpy(2, v2);
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+
+ try {
+ Dune::FieldMatrix<double, 1, 3> m1 = {{1, 2, 3}};
+ Dune::FieldMatrix<double, 2, 3> const m2 = {{1, 2, 3}, {10, 20, 30}};
+ m1 += m2;
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+
+ try {
+ Dune::FieldMatrix<double, 1, 3> m1 = {{1, 2, 3}};
+ Dune::FieldMatrix<double, 2, 3> const m2 = {{1, 2, 3}, {10, 20, 30}};
+ m1 -= m2;
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+
+ try {
+ Dune::FieldMatrix<double, 1, 3> m1 = {{1, 2, 3}};
+ Dune::FieldMatrix<double, 2, 3> const m2 = {{1, 2, 3}, {10, 20, 30}};
+ [[maybe_unused]] bool res = (m1 == m2);
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+
+ try {
+ Dune::FieldMatrix<double, 1, 3> m1 = {{1, 2, 3}};
+ Dune::FieldMatrix<double, 2, 3> const m2 = {{1, 2, 3}, {10, 20, 30}};
+ [[maybe_unused]] bool res = (m1 != m2);
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+
+ try {
+ Dune::FieldMatrix<double, 1, 3> m1 = {{1, 2, 3}};
+ Dune::FieldMatrix<double, 2, 3> const m2 = {{1, 2, 3}, {10, 20, 30}};
+ m1.axpy(2, m2);
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+
+ return passed ? 0 : 1;
+} catch (Dune::Exception &e) {
+ std::cerr << e << std::endl;
+ return 1;
+} catch (std::exception &e) {
+ std::cerr << e.what() << std::endl;
+ return 1;
+}
--- /dev/null
+#include <config.h>
+
+#include <dune/common/bitsetvector.hh>
+#include <dune/common/diagonalmatrix.hh>
+#include <dune/common/dynvector.hh>
+#include <dune/common/dynmatrix.hh>
+#include <dune/common/exceptions.hh>
+#include <dune/common/fvector.hh>
+#include <dune/common/fmatrix.hh>
+
+int main() try {
+ bool passed = true;
+
+ // Write beyond end of singleton vector
+ try {
+ Dune::FieldVector<double, 1> v = {1};
+ v[1] = 10;
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+
+ // Read beyond end of singleton vector
+ try {
+ Dune::FieldVector<double, 1> const v = {1};
+ [[maybe_unused]] double const x = v[1];
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+
+ // Write beyond end of vector
+ try {
+ Dune::FieldVector<double, 3> v = {1, 2, 3};
+ v[3] = 10;
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+ try {
+ Dune::DynamicVector<double> v = {1, 2, 3};
+ v[3] = 10;
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+
+ // Read beyond end of vector
+ try {
+ Dune::FieldVector<double, 3> const v = {1, 2, 3};
+ [[maybe_unused]] double const x = v[3];
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+ try {
+ Dune::DynamicVector<double> const v = {1, 2, 3};
+ [[maybe_unused]] double const x = v[3];
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+
+
+ // Write beyond end of singleton matrix
+ try {
+ Dune::FieldMatrix<double, 1, 1> m = {{1}};
+ m[1][0] = 100;
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+
+ // Read beyond end of singleton matrix
+ try {
+ Dune::FieldMatrix<double, 1, 1> const m = {{1}};
+ [[maybe_unused]] double const x = m[1][0];
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+
+ // Write beyond end of matrix
+ try {
+ Dune::FieldMatrix<double, 2, 3> m = {{1, 2, 3}, {10, 20, 30}};
+ m[2][0] = 100;
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+ try {
+ Dune::DynamicMatrix<double> m = {{1, 2, 3}, {10, 20, 30}};
+ m[2][0] = 100;
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+
+ // Read beyond end of matrix
+ try {
+ Dune::FieldMatrix<double, 2, 3> const m = {{1, 2, 3}, {10, 20, 30}};
+ [[maybe_unused]] double const x = m[2][0];
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+ try {
+ Dune::DynamicMatrix<double> const m = {{1, 2, 3}, {10, 20, 30}};
+ [[maybe_unused]] double const x = m[2][0];
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+
+ // Write beyond end of diagonal matrix (way #1)
+ try {
+ Dune::DiagonalMatrix<double, 3> d(5);
+ d[3][3] = 9;
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+ // Write beyond end of diagonal matrix (way #2)
+ try {
+ Dune::DiagonalMatrix<double, 3> d(5);
+ d.diagonal(3) = 9;
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+
+ // Read beyond end of diagonal matrix (way #1)
+ try {
+ Dune::DiagonalMatrix<double, 3> const d(5);
+ [[maybe_unused]] double const x = d[3][3];
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+ // Read beyond end of diagonal matrix (way #2)
+ try {
+ Dune::DiagonalMatrix<double, 3> const d(5);
+ [[maybe_unused]] double const x = d.diagonal(3);
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+
+ // Write outside of diagonal matrix pattern
+ try {
+ Dune::DiagonalMatrix<double, 3> d(5);
+ d[1][2] = 9;
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+
+ // Read outside of diagonal matrix pattern
+ try {
+ Dune::DiagonalMatrix<double, 3> const d(5);
+ [[maybe_unused]] double const x = d[1][2];
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+
+ // Check for entry beyond diagonal matrix size
+ try {
+ Dune::DiagonalMatrix<double, 3> const d(5);
+ d.exists(3, 3);
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+
+ // Check for entry beyond matrix size
+ try {
+ Dune::FieldMatrix<double, 2, 3> const m = {{1, 2, 3}, {10, 20, 30}};
+ m.exists(2, 2);
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+
+ // Read beyond end of bitsetvector
+ try {
+ Dune::BitSetVector<3> const b(10);
+ [[maybe_unused]] auto const x = b[10];
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+
+ // Write beyond end of bitsetvector
+ try {
+ Dune::BitSetVector<3> b(10);
+ b[10] = true;
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+
+ // Read beyond end of bitsetvectorreference
+ try {
+ Dune::BitSetVector<3> const b(10);
+ [[maybe_unused]] auto const x = b[10][3];
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+
+ // Write beyond end of bitsetvectorreference
+ try {
+ Dune::BitSetVector<3> b(10);
+ b[10][3] = true;
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+ return passed ? 0 : 1;
+} catch (Dune::Exception &e) {
+ std::cerr << e << std::endl;
+ return 1;
+} catch (std::exception &e) {
+ std::cerr << e.what() << std::endl;
+ return 1;
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <dune/common/stdthread.hh>
+
+int main() {
+ DUNE_ASSERT_CALL_ONCE();
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#include <config.h>
+#include <dune/common/dynvector.hh>
+
+int main(int, char **)
+{
+
+ Dune::DynamicVector<double> one(1);
+ Dune::DynamicVector<double> two(2);
+
+ two = one;
+
+ return 0;
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#include <config.h>
+#include <dune/common/fvector.hh>
+
+int main(int argc, char * argv[])
+{
+
+ Dune::FieldVector<double,DIM> one(1);
+ Dune::FieldVector<float,2> two(2);
+
+ one=two;
+
+ return 0;
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_COMMON_CHECK_MATRIX_INTERFACE_HH
+#define DUNE_COMMON_CHECK_MATRIX_INTERFACE_HH
+
+#include <algorithm>
+#include <limits>
+
+#include <dune/common/dynvector.hh>
+#include <dune/common/exceptions.hh>
+#include <dune/common/ftraits.hh>
+#include <dune/common/fvector.hh>
+
+
+/*
+ * @file
+ * @brief This file provides an interface check for dense matrices.
+ * @author Christoph Gersbacher
+ */
+
+
+namespace Dune
+{
+
+ // External forward declarations for namespace Dune
+ // ------------------------------------------------
+
+ template< class, int, int > class FieldMatrix;
+ template< class, int > class DiagonalMatrix;
+
+} // namespace Dune
+
+
+
+namespace CheckMatrixInterface
+{
+
+ namespace Capabilities
+ {
+
+ // hasStaticSizes
+ // --------------
+
+ template< class Matrix >
+ struct hasStaticSizes
+ {
+ static const bool v = false;
+ static const int rows = ~0;
+ static const int cols = ~0;
+ };
+
+ template< class Matrix >
+ struct hasStaticSizes< const Matrix >
+ {
+ static const bool v = hasStaticSizes< Matrix >::v;
+ static const int rows = hasStaticSizes< Matrix >::rows;
+ static const int cols = hasStaticSizes< Matrix >::cols;
+ };
+
+
+
+ // isSquare
+ // --------
+
+ template< class Matrix >
+ struct isSquare
+ {
+ static const bool v = false;
+ };
+
+ template< class Matrix >
+ struct isSquare< const Matrix >
+ {
+ static const bool v = isSquare< Matrix >::v;
+ };
+
+
+
+ // Template specializations for Dune::FieldMatrix
+ // ----------------------------------------------
+
+ template< class K, int r, int c >
+ struct hasStaticSizes< Dune::FieldMatrix< K, r, c > >
+ {
+ static const bool v = true;
+ static const int rows = r;
+ static const int cols = c;
+ };
+
+ template< class K, int rows, int cols >
+ struct isSquare< Dune::FieldMatrix< K, rows, cols > >
+ {
+ static const bool v = ( rows == cols );
+ };
+
+
+
+ // Template specializations for Dune::DiagonalMatrix
+ // -------------------------------------------------
+
+ template< class K, int n >
+ struct hasStaticSizes< Dune::DiagonalMatrix<K,n> >
+ {
+ static const bool v = true;
+ static const int rows = n;
+ static const int cols = n;
+ };
+
+ template< class K, int n >
+ struct isSquare< Dune::DiagonalMatrix<K,n> >
+ {
+ static const bool v = true;
+ };
+
+ } // namespace Capabilities
+
+
+
+ // UseDynamicVector
+ // ----------------
+
+ template< class Matrix >
+ struct UseDynamicVector
+ {
+ typedef typename Matrix::value_type value_type;
+
+ typedef Dune::DynamicVector< value_type > domain_type;
+ typedef domain_type range_type;
+
+ static domain_type domain ( const Matrix &matrix, value_type v = value_type() )
+ {
+ return domain_type( matrix.M(), v );
+ }
+
+ static range_type range ( const Matrix &matrix, value_type v = value_type() )
+ {
+ return range_type( matrix.N(), v );
+ }
+ };
+
+
+
+ // UseFieldVector
+ // --------------
+
+ template< class K, int rows, int cols >
+ struct UseFieldVector
+ {
+ typedef K value_type;
+
+ typedef Dune::FieldVector< K, cols > domain_type;
+ typedef Dune::FieldVector< K, rows > range_type;
+
+ template< class Matrix >
+ static domain_type domain ( const Matrix &, value_type v = value_type() )
+ {
+ return domain_type( v );
+ }
+
+ template< class Matrix >
+ static range_type range ( const Matrix &, value_type v = value_type() )
+ {
+ return range_type( v );
+ }
+ };
+
+
+
+ // MatrixSizeHelper
+ // ----------------
+
+ template< class Matrix, bool hasStaticSizes = Capabilities::hasStaticSizes< Matrix >::v >
+ struct MatrixSizeHelper;
+
+ template< class Matrix >
+ struct MatrixSizeHelper< Matrix, false >
+ {
+ typedef typename Matrix::size_type size_type;
+ static const size_type rows ( const Matrix &matrix ) { return matrix.rows(); }
+ static const size_type cols ( const Matrix &matrix ) { return matrix.cols(); }
+ };
+
+ template< class Matrix >
+ struct MatrixSizeHelper< Matrix, true >
+ {
+ typedef typename Matrix::size_type size_type;
+ static const size_type rows ( const Matrix & ) { return Matrix::rows; }
+ static const size_type cols ( const Matrix & ) { return Matrix::cols; }
+ };
+
+
+
+ // CheckIfSquareMatrix
+ // -------------------
+
+ template< class Matrix, class Traits, bool isSquare = Capabilities::isSquare< Matrix >::v >
+ struct CheckIfSquareMatrix;
+
+ template< class Matrix, class Traits >
+ struct CheckIfSquareMatrix< Matrix, Traits, false >
+ {
+ static void apply ( const Matrix &) {}
+
+ static void apply ( Matrix &) {}
+ };
+
+ template< class Matrix, class Traits >
+ struct CheckIfSquareMatrix< Matrix, Traits, true >
+ {
+ typedef typename Matrix::value_type value_type;
+
+ static value_type tolerance ()
+ {
+ return value_type( 16 ) * std::numeric_limits< value_type >::epsilon();
+ }
+
+ static void apply ( const Matrix &matrix )
+ {
+ const value_type determinant = matrix.determinant();
+ if( determinant > tolerance() )
+ {
+ typename Traits::domain_type x = Traits::domain( matrix );
+ const typename Traits::range_type b = Traits::range( matrix );
+ matrix.solve( x, b );
+ }
+ }
+
+ static void apply ( Matrix &matrix )
+ {
+ apply( const_cast< const Matrix & >( matrix ) );
+ if( matrix.determinant() > tolerance() )
+ matrix.invert();
+ }
+ };
+
+
+
+ // CheckConstMatrix
+ // ----------------
+
+ template< class Matrix, class Traits >
+ struct CheckConstMatrix
+ {
+ // required type definitions
+ typedef typename Matrix::size_type size_type;
+
+ typedef typename Matrix::value_type value_type;
+ typedef typename Matrix::field_type field_type;
+ typedef typename Matrix::block_type block_type;
+
+ typedef typename Matrix::const_row_reference const_row_reference;
+
+ typedef typename Matrix::ConstIterator ConstIterator;
+
+ static void apply ( const Matrix &matrix )
+ {
+ checkDataAccess ( matrix );
+ checkIterators ( matrix );
+ checkLinearAlgebra ( matrix );
+ checkNorms ( matrix );
+ checkSizes ( matrix );
+ CheckIfSquareMatrix< Matrix, Traits >::apply( matrix );
+
+ // TODO: check comparison
+ // bool operator == ( const Matrix &other );
+ // bool operator != ( const Matrix &other );
+ }
+
+ // check size methods
+ static void checkSizes ( const Matrix &matrix )
+ {
+ [[maybe_unused]] const size_type size = matrix.size();
+ const size_type rows = MatrixSizeHelper< Matrix >::rows( matrix );
+ const size_type cols = MatrixSizeHelper< Matrix >::cols( matrix );
+ const size_type N = matrix.N();
+ const size_type M = matrix.M();
+
+ if( N != rows || M != cols )
+ DUNE_THROW( Dune::RangeError, "Returned inconsistent sizes." );
+ }
+
+ // check read-only access to data
+ static void checkDataAccess ( const Matrix &matrix )
+ {
+ const size_type size = matrix.size();
+ for( size_type i = size_type( 0 ); i < size; ++i )
+ [[maybe_unused]] const_row_reference row = matrix[ i ];
+
+ const size_type rows = MatrixSizeHelper< Matrix >::rows( matrix );
+ const size_type cols = MatrixSizeHelper< Matrix >::cols( matrix );
+ for( size_type i = size_type( 0 ); i < rows; ++i )
+ {
+ for( size_type j = size_type( 0 ); j < cols; ++j )
+ {
+ [[maybe_unused]] bool exists = matrix.exists( i, j );
+ [[maybe_unused]] const value_type &value = matrix[ i ][ j ];
+ }
+ }
+ }
+
+ // check norms
+ static void checkNorms ( const Matrix &matrix )
+ {
+ typedef typename Dune::FieldTraits< value_type >::real_type real_type;
+ real_type frobenius_norm = matrix.frobenius_norm();
+ real_type frobenius_norm2 = matrix.frobenius_norm2();
+ real_type infinity_norm = matrix.infinity_norm() ;
+ real_type infinity_norm_real = matrix.infinity_norm_real();
+
+ if( std::min( std::min( frobenius_norm, frobenius_norm2 ),
+ std::min( infinity_norm, infinity_norm_real ) ) < real_type( 0 ) )
+ DUNE_THROW( Dune::InvalidStateException, "Norms must return non-negative value." );
+ }
+
+ // check basic linear algebra methods
+ static void checkLinearAlgebra ( const Matrix &matrix )
+ {
+ typename Traits::domain_type domain = Traits::domain( matrix );
+ typename Traits::range_type range = Traits::range( matrix );
+ typename Traits::value_type alpha( 1 );
+
+ matrix.mv( domain, range );
+ matrix.mtv( range, domain );
+ matrix.umv( domain, range );
+ matrix.umtv( range, domain );
+ matrix.umhv( range, domain );
+ matrix.mmv( domain, range );
+ matrix.mmtv( range, domain );
+ matrix.mmhv( range, domain );
+ matrix.usmv( alpha, domain, range );
+ matrix.usmtv( alpha, range, domain );
+ matrix.usmhv( alpha, range, domain );
+ }
+
+ // check iterator methods
+ static void checkIterators ( const Matrix &matrix )
+ {
+ const ConstIterator end = matrix.end();
+ for( ConstIterator it = matrix.begin(); it != end; ++it )
+ [[maybe_unused]] const_row_reference row = *it;
+ }
+ };
+
+
+
+ // CheckNonConstMatrix
+ // -------------------
+
+ template< class Matrix, class Traits >
+ struct CheckNonConstMatrix
+ {
+ // required type definitions
+ typedef typename Matrix::size_type size_type;
+ typedef typename Matrix::value_type value_type;
+ typedef typename Matrix::row_reference row_reference;
+ typedef typename Matrix::row_type row_type;
+ typedef typename Matrix::Iterator Iterator;
+
+ static void apply ( Matrix &matrix )
+ {
+ checkIterators( matrix );
+ checkAssignment( matrix );
+
+ CheckIfSquareMatrix< Matrix, Traits >::apply( matrix );
+
+ // TODO: check scalar/matrix and matrix/matrix operations
+ // Matrix &operator+= ( const Matrix &other );
+ // Matrix &operator-= ( const Matrix &other );
+ // Matrix &operator*= ( const value_type &v );
+ // Matrix &operator/= ( const value_type &v );
+ // Matrix &axpy += ( const value_type &v, const Matrix &other );
+ // Matrix &axpy += ( const value_type &v, const Matrix &other );
+ // Matrix &leftmultiply ( const Matrix &other );
+ // Matrix &rightmultiply ( const Matrix &other );
+ }
+
+ // check assignment
+ static void checkAssignment ( Matrix &matrix )
+ {
+ matrix = value_type( 1 );
+
+ const size_type size = matrix.size();
+ for( size_type i = size_type( 0 ); i < size; ++i )
+ {
+ row_reference row = matrix[ i ];
+ row = row_type( value_type( 0 ) );
+ }
+
+ const size_type rows = MatrixSizeHelper< Matrix >::rows( matrix );
+ const size_type cols = MatrixSizeHelper< Matrix >::cols( matrix );
+ for( size_type i = size_type( 0 ); i < rows; ++i )
+ {
+ for( size_type j = size_type( 0 ); j < cols; ++j )
+ matrix[ i ][ j ] = ( i == j ? value_type( 1 ) : value_type( 0 ) );
+ }
+ }
+
+ // check iterator methods
+ static void checkIterators ( Matrix &matrix )
+ {
+ const Iterator end = matrix.end();
+ for( Iterator it = matrix.begin(); it != end; ++it )
+ {
+ row_reference row = *it;
+ row = row_type( value_type( 0 ) );
+ }
+ }
+ };
+
+} // namespace CheckMatrixInterface
+
+
+
+namespace Dune
+{
+
+ // checkMatrixInterface
+ // --------------------
+
+ template< class Matrix, class Traits = CheckMatrixInterface::UseDynamicVector< Matrix > >
+ void checkMatrixInterface ( const Matrix &matrix )
+ {
+ CheckMatrixInterface::CheckConstMatrix< Matrix, Traits >::apply( matrix );
+ }
+
+ template< class Matrix, class Traits = CheckMatrixInterface::UseDynamicVector< Matrix > >
+ void checkMatrixInterface ( Matrix &matrix )
+ {
+ checkMatrixInterface( const_cast< const Matrix & >( matrix ) );
+ CheckMatrixInterface::CheckNonConstMatrix< Matrix, Traits >::apply( matrix );
+ }
+
+} // namespace Dune
+
+#endif // #ifndef DUNE_COMMON_CHECK_MATRIX_INTERFACE_HH
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#include "config.h"
+
+#include <bitset>
+#include <complex>
+#include <iostream>
+#include <regex>
+#include <string>
+
+#include <dune/common/classname.hh>
+#include <dune/common/fvector.hh>
+#include <dune/common/test/testsuite.hh>
+
+
+using CVRef = std::bitset<4>;
+constexpr CVRef is_const = 1;
+constexpr CVRef is_volatile = 2;
+constexpr CVRef is_lvalue_reference = 4;
+constexpr CVRef is_rvalue_reference = 8;
+constexpr CVRef is_reference = 12;
+
+void checkname(Dune::TestSuite &t, const std::string &name, CVRef cvref,
+ const std::string &pattern)
+{
+ const auto npos = std::string::npos;
+
+ std::cout << name << std::endl;
+
+ t.check(std::regex_search(name, std::regex{pattern}))
+ << '`' << name << "` does not look like `" << pattern << '`';
+
+ static const std::regex const_pattern{ R"(\bconst\b)" };
+ bool found_const = std::regex_search(name, const_pattern);
+ if((cvref & is_const) == is_const)
+ t.check(found_const) << '`' << name << "` contains `const`";
+ else
+ t.check(!found_const) << '`' << name << "` does not contain `const`";
+
+ static const std::regex volatile_pattern{ R"(\bvolatile\b)" };
+ bool found_volatile = std::regex_search(name, volatile_pattern);
+ if((cvref & is_volatile) == is_volatile)
+ t.check(found_volatile) << '`' << name << "` contains `volatile`";
+ else
+ t.check(!found_volatile) << '`' << name << "` does not contain `volatile`";
+
+ bool found_reference = name.find('&') != npos;
+ bool found_rvalue_reference = name.find("&&") != npos;
+ if((cvref & is_reference) == is_reference)
+ t.check(found_reference)
+ << '`' << name << "` does not contain `&` or `&&`";
+ else if((cvref & is_lvalue_reference) == is_lvalue_reference)
+ t.check(found_reference && !found_rvalue_reference)
+ << '`' << name << "` contains `&&` or does not contain `&`";
+ else if((cvref & is_rvalue_reference) == is_rvalue_reference)
+ t.check(found_rvalue_reference)
+ << '`' << name << "` does not contain `&&`";
+ else
+ t.check(!found_reference)
+ << '`' << name << "` contains `&` or `&&`";
+}
+
+struct Base {
+ virtual ~Base() = default;
+};
+
+struct Derived : Base {};
+
+int main()
+{
+ Dune::TestSuite t("className()");
+
+ std::cout << "First three simple class names extracted from variables:"
+ << std::endl;
+ Dune::FieldVector<int, 3> xi;
+ checkname(t, Dune::className(xi), {},
+ R"(\bFieldVector\s*<\s*int\s*,\s*3\s*>)");
+ Dune::FieldVector<double, 1> xd;
+ checkname(t, Dune::className(xd), {},
+ R"(\bFieldVector\s*<\s*double\s*,\s*1\s*>)");
+ Dune::FieldVector<std::complex<double>, 10> xcd;
+ checkname(t, Dune::className(xcd), {},
+ R"(\bFieldVector\s*<.*\bcomplex\s*<\s*double\s*>\s*,\s*10\s*>)");
+ std::cout << std::endl;
+
+ std::cout << "Adding const:" << std::endl;
+ const Dune::FieldVector<int, 3> cxi;
+ checkname(t, Dune::className(cxi), is_const,
+ R"(\bFieldVector\s*<\s*int\s*,\s*3\s*>)");
+ std::cout << std::endl;
+
+ std::cout << "If a variable is a reference that can not be extracted (needs "
+ << "decltype as used below): " << std::endl;
+ Dune::FieldVector<double, 1> &rxd = xd;
+ checkname(t, Dune::className(rxd), {},
+ R"(\bFieldVector\s*<\s*double\s*,\s*1\s*>)");
+ std::cout << std::endl;
+
+ std::cout << "Extracting the class name using a type directly - "
+ << "also extractes references correctly: " << std::endl;
+ checkname(t, Dune::className<decltype(rxd)>(), is_lvalue_reference,
+ R"(\bFieldVector\s*<\s*double\s*,\s*1\s*>)");
+ const Dune::FieldVector<double, 1> &rcxd = xd;
+ checkname(t, Dune::className<decltype(rcxd)>(), is_const|is_lvalue_reference,
+ R"(\bFieldVector\s*<\s*double\s*,\s*1\s*>)");
+ const Dune::FieldVector<int, 3> &rcxi = cxi;
+ checkname(t, Dune::className<decltype(rcxi)>(), is_const|is_lvalue_reference,
+ R"(\bFieldVector\s*<\s*int\s*,\s*3\s*>)");
+ std::cout << std::endl;
+
+ std::cout << "Test some further types:" << std::endl;
+ using RVXCD = volatile Dune::FieldVector<std::complex<double>, 10>&;
+ checkname(t, Dune::className<RVXCD>(), is_volatile|is_lvalue_reference,
+ R"(\bFieldVector\s*<.*\bcomplex\s*<\s*double\s*>\s*,\s*10\s*>)");
+ using RRXCD = Dune::FieldVector<std::complex<double>, 10>&&;
+ checkname(t, Dune::className<RRXCD>(), is_rvalue_reference,
+ R"(\bFieldVector\s*<.*\bcomplex\s*<\s*double\s*>\s*,\s*10\s*>)");
+ std::cout << std::endl;
+
+ std::cout << "Test printing dynamic vs. static types:" << std::endl;
+ Derived d{};
+ Base &b = d;
+ checkname(t, Dune::className(b), {}, R"(\bDerived\b)");
+ checkname(t, Dune::className<decltype(b)>(), is_lvalue_reference,
+ R"(\bBase\b)");
+ t.check(Dune::className<Derived>() == Dune::className(b))
+ << "dynamic type of base reference should match derived type";
+ std::cout << std::endl;
+
+ std::cout << "Test rvalue argument to className(expr):" << std::endl;
+ checkname(t, Dune::className(Base{}), {}, R"(\bBase\b)");
+ std::cout << std::endl;
+
+ #if !HAVE_CXA_DEMANGLE
+ // in this case we only make sure that no segfault or similar happens
+ return 0;
+ #else
+ return t.exit();
+ #endif
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_COMMON_TEST_COLLECTORSTREAM_HH
+#define DUNE_COMMON_TEST_COLLECTORSTREAM_HH
+
+#include <sstream>
+#include <string>
+#include <functional>
+
+
+#include <dune/common/typeutilities.hh>
+
+
+namespace Dune {
+
+
+
+/**
+ * \brief Data collector stream
+ *
+ * A class derived from std::ostringstream that allows to
+ * collect data via a temporary returned object. To facilitate
+ * this it stores a callback that is used to pass the collected
+ * data to its creator on destruction.
+ *
+ * In order to avoid passing the same data twice, copy construction
+ * is forbidden and only move construction is allowed.
+ */
+class CollectorStream : public std::ostringstream
+{
+public:
+
+ /**
+ * \brief Create from callback
+ *
+ * \tparam CallBack Type of callback. Must be convertible to std::function<void(std::string)>
+ * \param callBack A copy of this function will be stored and called on destruction.
+ */
+ template<class CallBack,
+ Dune::disableCopyMove<CollectorStream, CallBack> = 0>
+ CollectorStream(CallBack&& callBack) :
+ callBack_(callBack)
+ {}
+
+ CollectorStream(const CollectorStream& other) = delete;
+
+ /**
+ * \brief Move constructor
+ *
+ * This will take over the data and callback from the
+ * moved from CollectorStream and disable the callback
+ * in the latter.
+ */
+ CollectorStream(CollectorStream&& other) :
+ callBack_(other.callBack_)
+ {
+ (*this) << other.str();
+ other.callBack_ = [](std::string){};
+ }
+
+ /**
+ * \brief Destructor
+ *
+ * This calls the callback function given on creation
+ * passing all collected data as a single string argument.
+ */
+ ~CollectorStream()
+ {
+ callBack_(this->str());
+ }
+
+private:
+ std::function<void(std::string)> callBack_;
+};
+
+
+
+} // namespace Dune
+
+
+#endif // DUNE_COMMON_TEST_COLLECTORSTREAM_HH
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#include <config.h>
+
+#include <iostream>
+
+#include <dune/common/parallel/mpihelper.hh>
+#include <dune/common/typelist.hh>
+
+#include <dune/common/concept.hh>
+#include <dune/common/test/testsuite.hh>
+
+
+
+struct HasFoo
+{
+ template<class T>
+ auto require(const T& t) -> decltype(
+ t.foo()
+ );
+};
+
+struct HasBar
+{
+ template<class T>
+ auto require(const T& t) -> decltype(
+ t.bar()
+ );
+};
+
+struct HasFooAndBar1 : Dune::Concept::Refines<HasFoo>
+{
+ template<class T>
+ auto require(const T& t) -> decltype(
+ t.bar()
+ );
+};
+
+struct HasFooAndBar2 : Dune::Concept::Refines<HasBar>
+{
+ template<class T>
+ auto require(const T& t) -> decltype(
+ t.foo()
+ );
+};
+
+struct HasFooAndBar3
+{
+ template<class T>
+ auto require(const T& t) -> decltype(
+ t.foo(),
+ t.bar()
+ );
+};
+
+struct HasFooAndBar4 : Dune::Concept::Refines<HasFoo, HasBar>
+{
+ template<class T>
+ auto require(const T& t) -> decltype(
+ 0
+ );
+};
+
+struct HasFooAndBar5
+{
+ template<class T>
+ auto require(const T& t) -> decltype(
+ 0
+ );
+ using BaseConceptList = Dune::TypeList<HasFoo, HasBar>;
+};
+
+
+
+
+template<class T>
+struct Foo
+{
+ T foo() const
+ { return T(); }
+};
+
+template<class T>
+struct Bar
+{
+ T bar() const
+ { return T(); }
+};
+
+template<class T>
+struct FooBar
+{
+ T foo() const
+ { return T(); }
+
+ T bar() const
+ { return T(); }
+};
+
+
+
+
+int main ( int argc, char **argv )
+try
+{
+ using namespace Dune;
+
+ MPIHelper::instance(argc, argv);
+
+ TestSuite test;
+
+ test.check(models<HasFoo, Foo<int>>())
+ << "models<HasFoo, Foo<int>>() gives wrong result";
+
+ test.check(not models<HasFoo, Bar<int>>())
+ << "models<HasFoo, Bar<int>>() gives wrong result";
+
+ test.check(models<HasFoo, FooBar<int>>())
+ << "models<HasFoo, FooBar<int>>() gives wrong result";
+
+
+
+ test.check(not models<HasBar, Foo<int>>())
+ << "models<HasBar, Foo<int>>() gives wrong result";
+
+ test.check(models<HasBar, Bar<int>>())
+ << "models<HasBar, Bar<int>>() gives wrong result";
+
+ test.check(models<HasBar, FooBar<int>>())
+ << "models<HasBar, FooBar<int>>() gives wrong result";
+
+
+
+ test.check(not models<HasFooAndBar1, Foo<int>>())
+ << "models<HasFooAndBar1, Foo<int>>() gives wrong result";
+
+ test.check(not models<HasFooAndBar1, Bar<int>>())
+ << "models<HasFooAndBar1, Bar<int>>() gives wrong result";
+
+ test.check(models<HasFooAndBar1, FooBar<int>>())
+ << "models<HasFooAndBar1, FooBar<int>>() gives wrong result";
+
+
+
+ test.check(not models<HasFooAndBar2, Foo<int>>())
+ << "models<HasFooAndBar2, Foo<int>>() gives wrong result";
+
+ test.check(not models<HasFooAndBar2, Bar<int>>())
+ << "models<HasFooAndBar2, Bar<int>>() gives wrong result";
+
+ test.check(models<HasFooAndBar2, FooBar<int>>())
+ << "models<HasFooAndBar2, FooBar<int>>() gives wrong result";
+
+
+
+ test.check(not models<HasFooAndBar3, Foo<int>>())
+ << "models<HasFooAndBar3, Foo<int>>() gives wrong result";
+
+ test.check(not models<HasFooAndBar3, Bar<int>>())
+ << "models<HasFooAndBar3, Bar<int>>() gives wrong result";
+
+ test.check(models<HasFooAndBar3, FooBar<int>>())
+ << "models<HasFooAndBar3, FooBar<int>>() gives wrong result";
+
+
+
+ test.check(not models<HasFooAndBar4, Foo<int>>())
+ << "models<HasFooAndBar4, Foo<int>>() gives wrong result";
+
+ test.check(not models<HasFooAndBar4, Bar<int>>())
+ << "models<HasFooAndBar4, Bar<int>>() gives wrong result";
+
+ test.check(models<HasFooAndBar4, FooBar<int>>())
+ << "models<HasFooAndBar4, FooBar<int>>() gives wrong result";
+
+
+
+ test.check(not models<HasFooAndBar5, Foo<int>>())
+ << "models<HasFooAndBar5, Foo<int>>() gives wrong result";
+
+ test.check(not models<HasFooAndBar5, Bar<int>>())
+ << "models<HasFooAndBar5, Bar<int>>() gives wrong result";
+
+ test.check(models<HasFooAndBar5, FooBar<int>>())
+ << "models<HasFooAndBar5, FooBar<int>>() gives wrong result";
+
+
+
+ return test.exit();
+}
+catch( Dune::Exception &e )
+{
+ std::cerr << "Dune reported error: " << e << std::endl;
+ return 1;
+}
+catch(...)
+{
+ std::cerr << "Unknown exception thrown!" << std::endl;
+ return 1;
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+#include "config.h"
+
+#include <type_traits>
+
+#include <dune/common/hybridutilities.hh>
+
+int main()
+{
+ // check that the id argument is a constexpr functor
+ Dune::Hybrid::ifElse(std::true_type{}, [](auto id) {
+ static_assert(id(true),
+ "id() argument of ifElse() branches should be a constexpr functor");
+ });
+}
--- /dev/null
+// @GENERATED_SOURCE@
+
+#include <config.h>
+
+#include <dune/common/debugalign.hh>
+#include <dune/common/simd/test.hh>
+#include <dune/common/test/debugalignsimdtest.hh>
+
+namespace Dune {
+ namespace Simd {
+
+ template void UnitTest::check@POINT@<AlignedNumber<@SCALAR@> >();
+
+ } //namespace Simd
+} // namespace Dune
--- /dev/null
+// @GENERATED_SOURCE@
+
+#include <config.h>
+
+#include <cstdlib>
+#include <type_traits>
+
+#include <dune/common/typetraits.hh>
+#include <dune/common/debugalign.hh>
+#include <dune/common/parallel/mpihelper.hh>
+#include <dune/common/simd/test.hh>
+#include <dune/common/test/debugalignsimdtest.hh>
+
+template<class> struct RebindAccept : std::false_type {};
+#cmake @template@
+template<>
+struct RebindAccept<Dune::AlignedNumber<@SCALAR@> > : std::true_type {};
+#cmake @endtemplate@
+
+int main(int argc, char **argv)
+{
+ Dune::MPIHelper::instance(argc, argv);
+
+ Dune::Simd::UnitTest test;
+
+ using Rebinds = Dune::Simd::RebindList<
+#cmake @template@
+ @SCALAR@,
+#cmake @endtemplate@
+ Dune::Simd::EndMark>;
+
+#cmake @template@
+ test.check<Dune::AlignedNumber<@SCALAR@>,
+ Rebinds, Dune::AlwaysFalse, RebindAccept>();
+#cmake @endtemplate@
+
+ return test.good() ? EXIT_SUCCESS : EXIT_FAILURE;
+}
--- /dev/null
+// @GENERATED_SOURCE@
+
+#ifndef DUNE_COMMON_TEST_DEBUGALIGNSIMDTEST_HH
+#define DUNE_COMMON_TEST_DEBUGALIGNSIMDTEST_HH
+
+#include <dune/common/debugalign.hh>
+#include <dune/common/simd/test.hh>
+
+namespace Dune {
+ namespace Simd {
+
+#cmake @template POINT@
+ extern template void UnitTest::check@POINT@<AlignedNumber<@SCALAR@> >();
+#cmake @endtemplate@
+
+ } //namespace Simd
+} // namespace Dune
+
+#endif // DUNE_COMMON_TEST_DEBUGALIGNSIMDTEST_HH
--- /dev/null
+#include <config.h>
+
+#include <cstdint>
+#include <new>
+#include <tuple>
+#include <utility>
+
+#include <dune/common/debugalign.hh>
+#include <dune/common/hybridutilities.hh>
+#include <dune/common/parallel/mpihelper.hh>
+#include <dune/common/test/arithmetictestsuite.hh>
+#include <dune/common/test/testsuite.hh>
+
+class WithViolatedAlignmentHandler {
+ Dune::ViolatedAlignmentHandler oldhandler;
+public:
+ template<class H>
+ WithViolatedAlignmentHandler(H &&newhandler) :
+ oldhandler(Dune::violatedAlignmentHandler())
+ {
+ Dune::violatedAlignmentHandler() = std::forward<H>(newhandler);
+ }
+
+ WithViolatedAlignmentHandler(const WithViolatedAlignmentHandler &) = delete;
+ WithViolatedAlignmentHandler(WithViolatedAlignmentHandler &&) = delete;
+
+ WithViolatedAlignmentHandler&
+ operator=(const WithViolatedAlignmentHandler &) = delete;
+ WithViolatedAlignmentHandler&
+ operator=(WithViolatedAlignmentHandler &&) = delete;
+
+ ~WithViolatedAlignmentHandler()
+ {
+ Dune::violatedAlignmentHandler() = oldhandler;
+ }
+};
+
+// intentionally violate alignment and check that that is detected
+template<class T>
+void checkAlignmentViolation(Dune::TestSuite &test)
+{
+ bool misalignmentDetected = false;
+ WithViolatedAlignmentHandler
+ guard([&](auto&&...){ misalignmentDetected = true; });
+
+ char buffer[alignof(T)+sizeof(T)];
+
+ void* misalignedAddr;
+ {
+ // a more portable way to ddo this would be to use std::align(), but that
+ // isn't supported by g++-4.9 yet
+ auto addr = std::uintptr_t( (void*)buffer );
+ addr += alignof(T) - 1;
+ addr &= -std::uintptr_t(alignof(T));
+ addr += 1;
+ misalignedAddr = (void*)addr;
+ }
+
+ auto ptr = new(misalignedAddr) T;
+ test.check(misalignmentDetected, "default construct")
+ << "misalignment not detected for " << Dune::className<T>();
+
+ misalignmentDetected = false;
+
+ ptr->~T();
+ test.check(misalignmentDetected, "destruct")
+ << "misalignment not detected for " << Dune::className<T>();
+
+ misalignmentDetected = false;
+
+ ptr = new(misalignedAddr) T(T(0));
+ test.check(misalignmentDetected, "move construct")
+ << "misalignment not detected for " << Dune::className<T>();
+ ptr->~T(); // ignore any misalignment here
+
+ misalignmentDetected = false;
+
+ T t(0);
+ ptr = new(misalignedAddr) T(t);
+ test.check(misalignmentDetected, "copy construct")
+ << "misalignment not detected for " << Dune::className<T>();
+ ptr->~T(); // ignore any misalignment here
+}
+
+int main(int argc, char **argv)
+{
+ Dune::MPIHelper::instance(argc, argv);
+
+ Dune::ArithmeticTestSuite test;
+
+ using ArithmeticTypes = std::tuple<
+ bool,
+ char, signed char, unsigned char,
+ short, unsigned short,
+ int, unsigned,
+ long, long unsigned,
+ long long, long long unsigned,
+ wchar_t, char16_t, char32_t,
+ float, double, long double>;
+
+ Dune::Hybrid::forEach(ArithmeticTypes(), [&](auto val) {
+ using T = decltype(val);
+ using Aligned = Dune::AlignedNumber<T>;
+ test.checkArithmetic<Aligned, T>();
+
+ checkAlignmentViolation<Aligned>(test);
+ });
+
+ return test.exit();
+}
--- /dev/null
+#include "config.h"
+
+#define DUNE_CHECK_BOUNDS
+
+#include <complex>
+
+#include <dune/common/boundschecking.hh>
+#include <dune/common/diagonalmatrix.hh>
+#include <dune/common/dynmatrix.hh>
+#include <dune/common/fmatrix.hh>
+#include <dune/common/gmpfield.hh>
+
+template <class M>
+void populateMatrix(M &A, int rows, int cols) {
+ for (int i = 0; i < rows; ++i)
+ for (int j = 0; j < cols; ++j)
+ A[i][j] = i + 10 * j;
+}
+
+
+template< class K, int rows, int cols >
+struct Foo
+{
+ constexpr static int M () noexcept { return cols; }
+ constexpr static int N () noexcept { return rows; }
+
+ operator Dune::FieldMatrix< K, rows, cols > () const
+ {
+ Dune::FieldMatrix< K, rows, cols > A;
+ populateMatrix( A, rows, cols );
+ return A;
+ }
+};
+
+struct Bar {};
+
+template <class A, class B>
+bool identicalContents(A const &a, B const &b) {
+ typedef typename A::size_type Size;
+
+ if (a.N() != b.N() or a.M() != b.M())
+ return false;
+
+ for (Size i = 0; i < a.N(); ++i)
+ for (Size j = 0; j < b.N(); ++j)
+ if (a[i][j] != b[i][j])
+ return false;
+ return true;
+}
+
+template <typename ft>
+bool run() {
+ ft const constant = 47.11;
+ std::cout << "Testing with type: " << Dune::className(constant) << std::endl;
+
+ Dune::FieldMatrix<ft, 2, 3> fieldM;
+ Dune::FieldMatrix<ft, 1, 1> fieldMWrong11;
+ Dune::FieldMatrix<ft, 2, 2> fieldMWrong22;
+ Dune::FieldMatrix<ft, 3, 3> fieldMWrong33;
+ populateMatrix(fieldM, 2, 3);
+ populateMatrix(fieldMWrong11, 1, 1);
+ populateMatrix(fieldMWrong22, 2, 2);
+ populateMatrix(fieldMWrong33, 3, 3);
+
+ Foo< ft, 2, 3 > fooM;
+ fieldM = static_cast< Dune::FieldMatrix< ft, 2, 3 > >( fooM );
+
+ Dune::DynamicMatrix<ft> dynM(2, 3);
+ Dune::DynamicMatrix<ft> dynMWrong11(1, 1);
+ Dune::DynamicMatrix<ft> dynMWrong22(2, 2);
+ Dune::DynamicMatrix<ft> dynMWrong33(3, 3);
+ populateMatrix(dynM, 2, 3);
+ populateMatrix(dynMWrong11, 1, 1);
+ populateMatrix(dynMWrong22, 2, 2);
+ populateMatrix(dynMWrong33, 3, 3);
+
+ Dune::DiagonalMatrix<ft, 1> const diagMWrong1 = {1};
+ Dune::DiagonalMatrix<ft, 2> const diagMWrong2 = {1, 2};
+ Dune::DiagonalMatrix<ft, 3> const diagMWrong3 = {1, 2, 3};
+
+ bool passed = true;
+
+ static_assert(!Dune::HasDenseMatrixAssigner< Dune::FieldMatrix<ft, 2, 3>, std::vector< Dune::FieldMatrix<ft, 2, 3> > >::value,
+ "FieldMatrix is not assignable by a std::vector< FieldMatrix >!");
+ static_assert(!Dune::HasDenseMatrixAssigner< Dune::FieldMatrix<ft, 2, 3>, Bar >::value,
+ "FieldMatrix is not assignable by a Bar!");
+ static_assert(Dune::HasDenseMatrixAssigner< Dune::FieldMatrix<ft, 2, 3>, Dune::FieldMatrix<ft, 2, 3> >::value,
+ "FieldMatrix is assignable by FieldMatrix!");
+ static_assert(Dune::HasDenseMatrixAssigner< Dune::FieldMatrix<ft, 2, 3>, Dune::DynamicMatrix<ft> >::value,
+ "FieldMatrix is assignable by a DynamicMatrix!");
+
+ // class: FieldMatrix
+ {
+ using M = Dune::FieldMatrix<ft, 2, 3>;
+
+ // Assignment
+ {
+ M fieldT;
+ fieldT = fieldM;
+ if (!identicalContents(fieldT, fieldM)) {
+ std::cout << "FAIL: Content mismatch on line: " << __LINE__
+ << std::endl;
+ passed = false;
+ }
+ }
+ {
+ M fieldT;
+ fieldT = dynM;
+ if (!identicalContents(fieldT, dynM)) {
+ std::cout << "FAIL: Content mismatch on line: " << __LINE__
+ << std::endl;
+ passed = false;
+ }
+ }
+ {
+ M fieldT;
+ fieldT = constant;
+ }
+
+ // Copy construction
+ {
+ M const fieldT = fieldM;
+ if (!identicalContents(fieldT, fieldM)) {
+ std::cout << "FAIL: Content mismatch on line: " << __LINE__
+ << std::endl;
+ passed = false;
+ }
+ }
+ {
+ M const fieldT = dynM;
+ if (!identicalContents(fieldT, dynM)) {
+ std::cout << "FAIL: Content mismatch on line: " << __LINE__
+ << std::endl;
+ passed = false;
+ }
+ }
+ {
+ [[maybe_unused]] M const fieldT = constant;
+ }
+ }
+
+ // class: DynamicMatrix
+ {
+ using M = Dune::DynamicMatrix<ft>;
+
+ // Assignment
+ {
+ M dynT;
+ dynT = fieldM;
+ if (!identicalContents(dynT, fieldM)) {
+ std::cout << "FAIL: Content mismatch on line: " << __LINE__
+ << std::endl;
+ passed = false;
+ }
+ dynT = fieldMWrong11;
+ if (!identicalContents(dynT, fieldMWrong11)) {
+ std::cout << "FAIL: Content mismatch on line: " << __LINE__
+ << std::endl;
+ passed = false;
+ }
+ }
+ {
+ M dynT;
+ dynT = dynM;
+ if (!identicalContents(dynT, dynM)) {
+ std::cout << "FAIL: Content mismatch on line: " << __LINE__
+ << std::endl;
+ passed = false;
+ }
+ }
+ {
+ M dynT;
+ dynT = constant;
+ }
+
+ // Copy construction
+ {
+ M const dynT = fieldM;
+ if (!identicalContents(dynT, fieldM)) {
+ std::cout << "FAIL: Content mismatch on line: " << __LINE__
+ << std::endl;
+ passed = false;
+ }
+ }
+ {
+ M const dynT = dynM;
+ if (!identicalContents(dynT, dynM)) {
+ std::cout << "FAIL: Content mismatch on line: " << __LINE__
+ << std::endl;
+ passed = false;
+ }
+ }
+ }
+
+ // Assignment from other classes
+ {
+ using M = Dune::FieldMatrix<ft, 3, 3>;
+ Dune::DiagonalMatrix<ft, 3> diagM({1, 2, 3});
+ { [[maybe_unused]] M const fieldT = diagM; }
+ {
+ M fieldT;
+ fieldT = diagM;
+ }
+ }
+ {
+ using M = Dune::DynamicMatrix<ft>;
+ Dune::DiagonalMatrix<ft, 3> diagM({1, 2, 3});
+ { [[maybe_unused]] M const dynT = diagM; }
+ {
+ M dynT;
+ dynT = diagM;
+ }
+ }
+
+ // Invalid assignments
+ {
+ using M = Dune::FieldMatrix<ft, 2, 3>;
+#ifdef FAILURE0
+ {
+ // Should fail at compile-time
+ M fieldT;
+ fieldT = fieldMWrong11;
+ }
+#endif
+#ifdef FAILURE1
+ {
+ // Should fail at compile-time
+ M fieldT;
+ fieldT = fieldMWrong22;
+ }
+#endif
+#ifdef FAILURE2
+ {
+ // Should fail at compile-time
+ M fieldT;
+ fieldT = fieldMWrong33;
+ }
+#endif
+ try {
+ // Should fail at run-time with RangeError
+ M fieldT;
+ fieldT = dynMWrong11;
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+ try {
+ // Should fail at run-time with RangeError
+ M fieldT;
+ fieldT = dynMWrong22;
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+ try {
+ // Should fail at run-time with RangeError
+ M fieldT;
+ fieldT = dynMWrong33;
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+
+ try {
+ // Should fail at run-time with RangeError
+ // Note: this could be made to fail at compile-time already if
+ // we further specialised DenseMatrixAssigner to (FieldMatrix,
+ // DiagonalMatrix)
+ M fieldT;
+ fieldT = diagMWrong1;
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+ try {
+ // Should fail at run-time with RangeError
+ // Note: this could be made to fail at compile-time already if
+ // we further specialised DenseMatrixAssigner to (FieldMatrix,
+ // DiagonalMatrix)
+ M fieldT;
+ fieldT = diagMWrong2;
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+ try {
+ // Should fail at run-time with RangeError
+ // Note: this could be made to fail at compile-time already if
+ // we further specialised DenseMatrixAssigner to (FieldMatrix,
+ // DiagonalMatrix)
+ M fieldT;
+ fieldT = diagMWrong3;
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+ }
+
+ // Invalid copy construction
+ {
+ using M = Dune::FieldMatrix<ft, 2, 3>;
+#ifdef FAILURE3
+ {
+ // Should fail at compile-time
+ [[maybe_unused]] M const fieldT = fieldMWrong11;
+ }
+#endif
+#ifdef FAILURE4
+ {
+ // Should fail at compile-time
+ [[maybe_unused]] M const fieldT = fieldMWrong22;
+ }
+#endif
+#ifdef FAILURE5
+ {
+ // Should fail at compile-time
+ [[maybe_unused]] M const fieldT = fieldMWrong33;
+ }
+#endif
+ try {
+ // Should fail at run-time with RangeError
+ [[maybe_unused]] M const fieldT = dynMWrong11;
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+ try {
+ // Should fail at run-time with RangeError
+ [[maybe_unused]] M const fieldT = dynMWrong22;
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+ try {
+ // Should fail at run-time with RangeError
+ [[maybe_unused]] M const fieldT = dynMWrong33;
+ std::cout << "(line " << __LINE__ << ") Error: No exception thrown."
+ << std::endl;
+ passed = false;
+ } catch (const Dune::RangeError&) {
+ std::cout << "(line " << __LINE__
+ << ") All good: Exception thrown as expected." << std::endl;
+ }
+ }
+ {
+#ifdef FAILURE6
+ using M = Dune::DynamicMatrix<ft>;
+ {
+ // Should fail at compile-time
+ [[maybe_unused]] M const dynT = constant;
+ }
+#endif
+ }
+ std::cout << std::endl;
+ return passed;
+}
+
+int main() {
+ bool passed = true;
+ passed = passed && run<double>();
+ passed = passed && run<std::complex<double>>();
+#ifdef HAVE_GMP
+ passed = passed && run<Dune::GMPField<128u>>();
+#endif
+ return passed ? 0 : 1;
+}
--- /dev/null
+#include <iostream>
+
+#include <dune/common/densevector.hh>
+#include <dune/common/dynvector.hh>
+#include <dune/common/exceptions.hh>
+#include <dune/common/fvector.hh>
+
+using namespace Dune;
+
+template <class FirstVectorImp, class SecondVectorImp>
+void assign(DenseVector<FirstVectorImp>& first, const DenseVector<SecondVectorImp>& second)
+{
+ first = second;
+}
+
+bool run()
+{
+ bool passed = true;
+ FieldVector<double, 3> fvec1{1, 2, 3};
+ DynamicVector<double> dynvec1{1, 2, 3};
+ FieldVector<double, 3> fvec2;
+ DynamicVector<double> dynvec2(3);
+ // check mixed assignments
+ assign(fvec2, dynvec1);
+ assign(dynvec2, fvec1);
+ for (size_t i = 0; i < 3; ++i) {
+ if (fvec2[i] != dynvec1[i]) {
+ std::cerr << "Assigning a DynamicVector to a FieldVector as DenseVectors does not work!"
+ << std::endl << i << "-th entry after assignment is " << fvec2[i] << ", should be "
+ << i+1 << "!" << std::endl;
+ passed = false;
+ }
+ if (dynvec1[i] != dynvec2[i]) {
+ std::cerr << "Assigning a FieldVector to a DynamicVector as DenseVectors does not work"
+ << std::endl << i << "-th entry after assignment is " << dynvec1[i] << ", should be "
+ << i+1 << "!" << std::endl;
+ passed = false;
+ }
+ }
+ return passed;
+}
+
+int main()
+{
+ bool passed = run();
+ if (!passed)
+ DUNE_THROW(Dune::Exception, "Test failed");
+ return !passed;
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <iostream>
+#include <algorithm>
+
+#include <dune/common/densevector.hh>
+#include <dune/common/exceptions.hh>
+
+class MyVector;
+
+namespace Dune
+{
+ template< >
+ struct DenseMatVecTraits< MyVector >
+ {
+ typedef MyVector derived_type;
+ typedef double value_type;
+ typedef unsigned int size_type;
+ };
+}
+
+class MyVector : public Dune::DenseVector< MyVector >
+{
+public:
+ MyVector ( unsigned int size, double v = 0 )
+ : data_( size, v ) {}
+
+ unsigned int size () const { return data_.size(); }
+
+ double& operator[] ( unsigned int i ) { return data_[ i ]; }
+ const double& operator[] ( unsigned int i ) const { return data_[ i ]; }
+protected:
+ std::vector< double > data_;
+};
+
+
+int main()
+{
+ try
+ {
+ unsigned int n = 15;
+ MyVector v( n, 1 );
+ if( ( v.end() - v.begin() ) < 0 )
+ DUNE_THROW(Dune::Exception, "Negative value reported for end() - begin()" );
+
+ return 0;
+ } catch (Dune::Exception& e) {
+ std::cerr << e << std::endl;
+ return 1;
+ } catch (...) {
+ std::cerr << "Generic exception!" << std::endl;
+ return 2;
+ }
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#ifndef DUNE_FMatrix_WITH_CHECKING
+#define DUNE_FMatrix_WITH_CHECKING
+#endif
+
+#include <iostream>
+#include <algorithm>
+
+#include <dune/common/exceptions.hh>
+#include <dune/common/fvector.hh>
+#include <dune/common/diagonalmatrix.hh>
+
+#include "checkmatrixinterface.hh"
+
+using namespace Dune;
+
+template<class K, int n>
+void test_matrix()
+{
+ [[maybe_unused]] typedef typename DiagonalMatrix<K,n>::size_type size_type;
+
+ DiagonalMatrix<K,n> A(1);
+ FieldVector<K,n> f;
+ FieldVector<K,n> v;
+
+ // test constexpr size
+ static_assert(A.N() == n, "");
+ static_assert(A.M() == n, "");
+
+ // assign matrix
+ A=2;
+
+ // assign vector
+ f = 1;
+ v = 2;
+
+ // matrix vector product
+ A.umv(v,f);
+
+
+ // test norms
+ A.frobenius_norm();
+ A.frobenius_norm2();
+ A.infinity_norm();
+ A.infinity_norm_real();
+
+ std::sort(v.begin(), v.end());
+
+ // print matrix
+ std::cout << A << std::endl;
+ // print vector
+ std::cout << f << std::endl;
+
+ // assign to FieldMatrix
+ [[maybe_unused]] FieldMatrix<K,n,n> AFM = FieldMatrix<K,n,n>(A);
+ [[maybe_unused]] FieldMatrix<K,n,n> AFM2 = A;
+ [[maybe_unused]] FieldMatrix<K,n,n> AFM3;
+ AFM3 = A;
+}
+
+template<class K, int n>
+void test_interface()
+{
+ typedef CheckMatrixInterface::UseFieldVector<K,n,n> Traits;
+ typedef Dune::DiagonalMatrix<K,n> DiagonalMatrix;
+
+ const DiagonalMatrix A(1);
+ checkMatrixInterface< DiagonalMatrix >( A );
+ checkMatrixInterface< DiagonalMatrix, Traits >( A );
+}
+
+void test_initialisation()
+{
+ [[maybe_unused]] Dune::DiagonalMatrix<int, 2> const b = { 1, 2 };
+
+ assert(b.diagonal(0) == 1);
+ assert(b.diagonal(1) == 2);
+}
+
+int main()
+{
+ try {
+ test_matrix<float, 1>();
+ test_interface<float, 1>();
+ test_matrix<double, 1>();
+ test_interface<double, 1>();
+ test_matrix<double, 5>();
+ test_interface<double, 5>();
+ }
+ catch (Dune::Exception & e)
+ {
+ std::cerr << "Exception: " << e << std::endl;
+ }
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_COMMON_DUMMYITERATOR_HH
+#define DUNE_COMMON_DUMMYITERATOR_HH
+
+#include <cstddef>
+#include <type_traits>
+
+#include <dune/common/iteratorfacades.hh>
+
+template<typename T>
+class dummyiterator
+ : public Dune::BidirectionalIteratorFacade<dummyiterator<T>, T, T&,
+ std::ptrdiff_t>
+{
+ friend class dummyiterator<const typename std::remove_const<T>::type>;
+
+ T *value;
+
+public:
+ dummyiterator(T& value_)
+ : value(&value_)
+ {}
+
+ template<typename T2>
+ dummyiterator
+ ( const dummyiterator<T2>& o,
+ typename std::enable_if<std::is_convertible<T2&, T&>::value>::type* = 0)
+ : value(o.value)
+ {}
+
+ T& derefence() const {
+ return *value;
+ }
+
+ bool equals(const dummyiterator& o) const {
+ return value == o.value;
+ }
+
+ void increment() {}
+ void decrement() {}
+};
+
+#endif // DUNE_COMMON_DUMMYITERATOR_HH
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+// Activate checking
+#ifndef DUNE_FMatrix_WITH_CHECKING
+#define DUNE_FMatrix_WITH_CHECKING
+#endif
+
+#include <dune/common/dynmatrix.hh>
+#include <dune/common/dynvector.hh>
+#include <dune/common/ftraits.hh>
+#include <dune/common/fvector.hh>
+
+#include <iostream>
+#include <algorithm>
+#include <vector>
+
+#include "checkmatrixinterface.hh"
+
+using namespace Dune;
+
+template<typename T, std::size_t n>
+int test_invert_solve(Dune::DynamicMatrix<T> &A,
+ Dune::DynamicMatrix<T> &inv,
+ Dune::FieldVector<T, n> &x,
+ Dune::FieldVector<T, n> &b)
+{
+ int ret=0;
+
+ std::cout <<"Checking inversion of:"<<std::endl;
+
+ DynamicMatrix<T> calced_inv(n,n);
+ FieldVector<T,n> calced_x;
+
+ std::cout<<A<<std::endl;
+
+ // Check whether given inverse is correct
+ DynamicMatrix<T> prod = A;
+ prod.rightmultiply(inv);
+ for (size_t i=0; i<n; i++)
+ prod[i][i] -= 1;
+
+ bool equal=true;
+ if (prod.infinity_norm() > 1e-6) {
+ std::cerr<<"Given inverse wrong"<<std::endl;
+ equal=false;
+ }
+
+ DynamicMatrix<T> copy(A);
+ A.invert();
+
+ calced_inv = A;
+ A-=inv;
+
+
+ auto epsilon = std::numeric_limits<typename FieldTraits<T>::real_type>::epsilon();
+ auto tolerance = 10*epsilon;
+ for(size_t i =0; i < n; ++i)
+ for(size_t j=0; j <n; ++j)
+ if(std::abs(A[i][j])>tolerance) {
+ std::cerr<<"calculated inverse wrong at ("<<i<<","<<j<<")"<<std::endl;
+ equal=false;
+ }
+
+ if(!equal) {
+ ret++;
+ std::cerr<<"Calculated inverse was:"<<std::endl;
+ std::cerr <<calced_inv<<std::endl;
+ std::cerr<<"Should have been"<<std::endl;
+ std::cerr<<inv << std::endl;
+ }else
+ std::cout<<"Result is"<<std::endl<<calced_inv<<std::endl;
+
+
+ std::cout<<"Checking solution for rhs="<<b<<std::endl;
+
+ // Check whether given solution is correct
+ FieldVector<T,n> trhs=b;
+
+ copy.mmv(x,trhs);
+ equal=true;
+
+ if (trhs.infinity_norm() > 1e-6) {
+ std::cerr<<"Given rhs does not fit solution"<<std::endl;
+ equal=false;
+ }
+ copy.solve(calced_x, b);
+ FieldVector<T,n> xcopy(calced_x);
+ xcopy-=x;
+
+ equal=true;
+
+ for(size_t i =0; i < n; ++i)
+ if(std::abs(xcopy[i])>tolerance) {
+ std::cerr<<"calculated isolution wrong at ("<<i<<")"<<std::endl;
+ equal=false;
+ }
+
+ if(!equal) {
+ ret++;
+ std::cerr<<"Calculated solution was:"<<std::endl;
+ std::cerr <<calced_x<<std::endl;
+ std::cerr<<"Should have been"<<std::endl;
+ std::cerr<<x<<std::endl;
+ std::cerr<<"difference is "<<xcopy<<std::endl;
+ }else
+ std::cout<<"Result is "<<calced_x<<std::endl;
+
+ return ret;
+}
+
+
+int test_invert_solve()
+{
+ int ret = 0;
+
+ using DM = Dune::DynamicMatrix<double>;
+ using FV = Dune::FieldVector<double, 3>;
+
+ DM A = {{1, 5, 7}, {2, 14, 15}, {4, 40, 39}};
+ DM inv = {{-9.0 / 4, 85.0 / 24, -23.0 / 24},
+ {-3.0 / 4, 11.0 / 24, -1.0 / 24},
+ {1, -5.0 / 6, 1.0 / 6}};
+ FV b = {32, 75, 201};
+ FV x = {1, 2, 3};
+
+ ret += test_invert_solve<double, 3>(A, inv, x, b);
+
+ DM A0 = {{-0.5, 0, -0.25}, {0.5, 0, -0.25}, {0, 0.5, 0}};
+ DM inv0 = {{-1, 1, 0}, {0, 0, 2}, {-2, -2, 0}};
+ FV b0 = {32, 75, 201};
+ FV x0 = {43, 402, -214};
+
+ ret += test_invert_solve<double, 3>(A0, inv0, x0, b0);
+
+ DM A1 = {{0, 1, 0}, {1, 0, 0}, {0, 0, 1}};
+ FV b1 = {0, 1, 2};
+ FV x1 = {1, 0, 2};
+
+ ret += test_invert_solve<double, 3>(A1, A1, x1, b1);
+
+ DM A2 = {{3, 1, 6}, {2, 1, 3}, {1, 1, 1}};
+ DM inv2 = {{-2, 5, -3}, {1, -3, 3}, {1, -2, 1}};
+ FV b2 = {2, 7, 4};
+ FV x2 = {19, -7, -8};
+
+ return ret + test_invert_solve<double, 3>(A2, inv2, x2, b2);
+}
+
+template<class K, class X, class Y>
+void test_mult(DynamicMatrix<K>& A,
+ X& v, Y& f)
+{
+ // test the various matrix-vector products
+ A.mv(v,f);
+ A.mtv(f,v);
+ A.umv(v,f);
+ A.umtv(f,v);
+ A.umhv(f,v);
+ A.mmv(v,f);
+ A.mmtv(f,v);
+ A.mmhv(f,v);
+ A.usmv((K)0.5,v,f);
+ A.usmtv((K)0.5,f,v);
+ A.usmhv((K)0.5,f,v);
+}
+
+
+template<class K, int n, int m>
+void test_matrix()
+{
+ typedef typename DynamicMatrix<K>::size_type size_type;
+
+ DynamicMatrix<K> A(n,m);
+ DynamicVector<K> f(n);
+ DynamicVector<K> v(m);
+
+ // assign matrix
+ A=K();
+ // random access matrix
+ for (size_type i=0; i<n; i++)
+ for (size_type j=0; j<m; j++)
+ A[i][j] = i*j;
+ // iterator matrix
+ typename DynamicMatrix<K>::RowIterator rit = A.begin();
+ for (; rit!=A.end(); ++rit)
+ {
+ rit.index();
+ typename DynamicMatrix<K>::ColIterator cit = rit->begin();
+ for (; cit!=rit->end(); ++cit)
+ {
+ cit.index();
+ (*cit) *= 2;
+ }
+ }
+
+ // assign vector
+ f = 1;
+
+ // random access vector
+ for (size_type i=0; i<v.dim(); i++)
+ v[i] = i;
+ // iterator vector
+ typename DynamicVector<K>::iterator it = v.begin();
+ typename DynamicVector<K>::ConstIterator end = v.end();
+ for (; it!=end; ++it)
+ {
+ it.index();
+ (*it) *= 2;
+ }
+ // reverse iterator vector
+ it = v.beforeEnd();
+ end = v.beforeBegin();
+ for (; it!=end; --it)
+ (*it) /= 2;
+ // find vector
+ for (size_type i=0; i<v.dim(); i++)
+ {
+ it = v.find(i);
+ (*it) += 1;
+ }
+
+ // matrix vector product
+ A.umv(v,f);
+ // check that mv and umv are doing the same thing
+ {
+ DynamicVector<K> res2(n,0);
+ DynamicVector<K> res1(n);
+
+ DynamicVector<K> b(m,1);
+
+ A.mv(b, res1);
+ A.umv(b, res2);
+
+ if( (res1 - res2).two_norm() > 1e-12 )
+ {
+ DUNE_THROW(FMatrixError,"mv and umv are not doing the same!");
+ }
+ }
+
+ {
+ FieldVector<K,m> v0;
+ for (size_t i=0; i<m; i++) v0[i] = v[i];
+ test_mult(A, v0, f );
+ }
+
+ {
+ DynamicVector<K> v0 ( v );
+ test_mult(A, v0, f );
+ }
+
+ // {
+ // std::vector<K> v1( m ) ;
+ // std::vector<K> f1( n, 1 ) ;
+ // // random access vector
+ // for (size_type i=0; i<v1.size(); i++) v1[i] = i;
+ // test_mult(A, v1, f1 );
+ // }
+ // {
+ // K v2[ m ];
+ // K f2[ n ];
+ // // random access vector
+ // for (size_type i=0; i<m; ++i) v2[i] = i;
+ // for (size_type i=0; i<n; ++i) f2[i] = 1;
+ // test_mult(A, v2, f2 );
+ // }
+
+ // Test the different matrix norms
+ assert( A.frobenius_norm() >= 0 );
+ assert( A.frobenius_norm2() >= 0 );
+ assert( A.infinity_norm() >= 0 );
+ assert( A.infinity_norm_real() >= 0);
+
+ std::sort(v.begin(), v.end());
+
+ // print matrix
+ std::cout << A << std::endl;
+ // print vector
+ std::cout << f << std::endl;
+
+
+ {
+ DynamicMatrix<K> A2 = A;
+ A2 *= 2;
+
+ DynamicMatrix<K> B = A;
+ B += A;
+ B -= A2;
+ if (std::abs(B.infinity_norm()) > 1e-12)
+ DUNE_THROW(FMatrixError,"Operator +=/-= test failed!");
+ }
+ {
+ DynamicMatrix<K> A3 = A;
+ A3 *= 3;
+
+ DynamicMatrix<K> B = A;
+ B.axpy( K( 2 ), B );
+ B -= A3;
+ if (std::abs(B.infinity_norm()) > 1e-12)
+ DUNE_THROW(FMatrixError,"Axpy test failed!");
+ }
+ {
+ DynamicMatrix<K> A2(n,n+1);
+ for(size_type i=0; i<A2.N(); ++i)
+ for(size_type j=0; j<A2.M(); ++j)
+ A2[i][j] = i;
+ [[maybe_unused]] const DynamicMatrix<K>& Aref = A2;
+
+
+ DynamicMatrix<K> B(n+1,n+1);
+ for(size_type i=0; i<B.N(); ++i)
+ for(size_type j=0; j<B.M(); ++j)
+ B[i][j] = i;
+ [[maybe_unused]] const DynamicMatrix<K>& Bref = B;
+
+ DynamicMatrix<K> C(n,n);
+ for(size_type i=0; i<C.N(); ++i)
+ for(size_type j=0; j<C.M(); ++j)
+ C[i][j] = i;
+ [[maybe_unused]] const DynamicMatrix<K>& Cref = C;
+
+#if 0
+ DynamicMatrix<K> AB = Aref.rightmultiplyany(B);
+ for(size_type i=0; i<AB.N(); ++i)
+ for(size_type j=0; j<AB.M(); ++j)
+ if (std::abs<double>(AB[i][j] - i*n*(n+1)/2) > 1e-10)
+ DUNE_THROW(FMatrixError,"Rightmultiplyany test failed!");
+
+ DynamicMatrix<K> AB2 = A;
+ AB2.rightmultiply(B);
+ AB2 -= AB;
+ if (std::abs(AB2.infinity_norm() > 1e-10))
+ DUNE_THROW(FMatrixError,"Rightmultiply test failed!");
+
+ DynamicMatrix<K> AB3 = Bref.leftmultiplyany(A);
+ AB3 -= AB;
+ if (std::abs(AB3.infinity_norm() > 1e-10))
+ DUNE_THROW(FMatrixError,"Leftmultiplyany test failed!");
+
+ DynamicMatrix<K> CA = Aref.leftmultiplyany(C);
+ for(size_type i=0; i<CA.N(); ++i)
+ for(size_type j=0; j<CA.M(); ++j)
+ if (std::abs<double>(CA[i][j] - i*n*(n-1)/2) > 1e-10)
+ DUNE_THROW(FMatrixError,"Leftmultiplyany test failed!");
+
+ DynamicMatrix<K> CA2 = A;
+ CA2.leftmultiply(C);
+ CA2 -= CA;
+ if (std::abs(CA2.infinity_norm() > 1e-10))
+ DUNE_THROW(FMatrixError,"Leftmultiply test failed!");
+
+ DynamicMatrix<K> CA3 = Cref.rightmultiplyany(A);
+ CA3 -= CA;
+ if (std::abs(CA3.infinity_norm() > 1e-10))
+ DUNE_THROW(FMatrixError,"Rightmultiplyany test failed!");
+#endif
+ }
+}
+
+int test_determinant()
+{
+ int ret = 0;
+
+ DynamicMatrix<double> B(4,4);
+ B[0][0] = 3.0; B[0][1] = 0.0; B[0][2] = 1.0; B[0][3] = 0.0;
+ B[1][0] = -1.0; B[1][1] = 3.0; B[1][2] = 0.0; B[1][3] = 0.0;
+ B[2][0] = -3.0; B[2][1] = 0.0; B[2][2] = -1.0; B[2][3] = 2.0;
+ B[3][0] = 0.0; B[3][1] = -1.0; B[3][2] = 0.0; B[3][3] = 1.0;
+ if (std::abs(B.determinant() + 2.0) > 1e-12)
+ {
+ std::cerr << "Determinant 1 test failed" << std::endl;
+ ++ret;
+ }
+
+ B[0][0] = 3.0; B[0][1] = 0.0; B[0][2] = 1.0; B[0][3] = 0.0;
+ B[1][0] = -1.0; B[1][1] = 3.0; B[1][2] = 0.0; B[1][3] = 0.0;
+ B[2][0] = -3.0; B[2][1] = 0.0; B[2][2] = -1.0; B[2][3] = 2.0;
+ B[3][0] = -1.0; B[3][1] = 3.0; B[3][2] = 0.0; B[3][3] = 2.0;
+ if (B.determinant() != 0.0)
+ {
+ std::cerr << "Determinant 2 test failed" << std::endl;
+ ++ret;
+ }
+
+ return 0;
+}
+
+int main()
+{
+ try {
+ Dune::DynamicMatrix<double> A( 5, 5 );
+ checkMatrixInterface( A );
+
+ test_matrix<float, 1, 1>();
+ test_matrix<double, 1, 1>();
+ test_matrix<int, 10, 5>();
+ test_matrix<double, 5, 10>();
+ test_determinant();
+ Dune::DynamicMatrix<double> B(34, 34, 1e-15);
+ for (int i=0; i<34; i++) B[i][i] = 1;
+ B.invert();
+ return test_invert_solve();
+ }
+ catch (Dune::Exception & e)
+ {
+ std::cerr << "Exception: " << e << std::endl;
+ }
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <iostream>
+
+#include <dune/common/dynvector.hh>
+#include <dune/common/exceptions.hh>
+
+using Dune::DynamicVector;
+
+template<class ct>
+void dynamicVectorTest(int d) {
+ ct a = 1;
+ DynamicVector<ct> v(d,1);
+ DynamicVector<ct> w(d,2);
+ DynamicVector<ct> z(d,2);
+ [[maybe_unused]] bool b;
+
+ // Test whether the norm methods compile
+ (w+v).two_norm();
+ (w+v).two_norm2();
+ (w+v).one_norm();
+ (w+v).one_norm_real();
+ (w+v).infinity_norm();
+ (w+v).infinity_norm_real();
+
+ // test op(vec,vec)
+ z = v + w;
+ z = v - w;
+ DynamicVector<ct> z2 = v + w;
+ w -= v;
+ w += v;
+
+ // test op(vec,scalar)
+ w +=a;
+ w -= a;
+ w *= a;
+ w /= a;
+
+ // test scalar product, axpy
+ a = v * w;
+ z = v.axpy(a,w);
+
+ // test comparison
+ b = (w != v);
+ b = (w == v);
+
+
+ // test istream operator
+ std::stringstream s;
+ for (int i=0; i<d; i++)
+ {
+ s << i << " ";
+ v[i] = i;
+ }
+ s >> w;
+ assert(v == w);
+
+}
+
+int main()
+{
+ try {
+ for (int d=1; d<6; d++)
+ {
+ dynamicVectorTest<int>(d);
+ dynamicVectorTest<float>(d);
+ dynamicVectorTest<double>(d);
+ }
+ } catch (Dune::Exception& e) {
+ std::cerr << e << std::endl;
+ return 1;
+ } catch (...) {
+ std::cerr << "Generic exception!" << std::endl;
+ return 2;
+ }
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <dune/common/fvector.hh>
+#include <dune/common/fmatrix.hh>
+#include <dune/common/dynmatrixev.hh>
+#include <dune/common/fmatrixev.hh>
+
+#include <algorithm>
+#include <limits>
+#include <list>
+#include <complex>
+
+using namespace Dune;
+
+#if HAVE_LAPACK
+/** \brief Test the eigenvalue code with the Rosser test matrix
+
+ This matrix was a challenge for many matrix eigenvalue
+ algorithms. But the Francis QR algorithm, as perfected by
+ Wilkinson and implemented in EISPACK, has no trouble with it. The
+ matrix is 8-by-8 with integer elements. It has:
+
+ * A double eigenvalue
+ * Three nearly equal eigenvalues
+ * Dominant eigenvalues of opposite sign
+ * A zero eigenvalue
+ * A small, nonzero eigenvalue
+
+*/
+template<typename ft>
+void testRosserMatrix()
+{
+ DynamicMatrix<ft> A = {
+ { 611, 196, -192, 407, -8, -52, -49, 29 },
+ { 196, 899, 113, -192, -71, -43, -8, -44 },
+ { -192, 113, 899, 196, 61, 49, 8, 52 },
+ { 407, -192, 196, 611, 8, 44, 59, -23 },
+ { -8, -71, 61, 8, 411, -599, 208, 208 },
+ { -52, -43, 49, 44, -599, 411, 208, 208 },
+ { -49, -8, 8, 59, 208, 208, 99, -911 },
+ { 29, -44, 52, -23, 208, 208, -911, 99}
+ };
+
+ // compute eigenvalues
+ DynamicVector<std::complex<ft> > eigenComplex;
+ DynamicMatrixHelp::eigenValuesNonSym(A, eigenComplex);
+
+ // test results
+ /*
+ reference solution computed with octave 3.2
+
+ > format long e
+ > eig(rosser())
+
+ */
+ std::vector<ft> reference = {
+ -1.02004901843000e+03,
+ -4.14362871168386e-14,
+ 9.80486407214362e-02,
+ 1.00000000000000e+03,
+ 1.00000000000000e+03,
+ 1.01990195135928e+03,
+ 1.02000000000000e+03,
+ 1.02004901843000e+03
+ };
+
+ std::vector<ft> eigenRealParts(8);
+ for (int i=0; i<8; i++)
+ eigenRealParts[i] = std::real(eigenComplex[i]);
+
+ std::sort(eigenRealParts.begin(), eigenRealParts.end());
+
+ for (int i=0; i<8; i++)
+ {
+ if (std::fabs(std::imag(eigenComplex[i])) > 1e-10)
+ DUNE_THROW(MathError, "Symmetric matrix has complex eigenvalue");
+
+ if( std::fabs(reference[i] - eigenRealParts[i]) > 1e-10 )
+ DUNE_THROW(MathError,"error computing eigenvalues of Rosser-matrix");
+ }
+
+ std::cout << "Eigenvalues of Rosser matrix: " << eigenComplex << std::endl;
+}
+#endif // HAVE_LAPACK
+
+template <class field_type, int dim>
+void testSymmetricFieldMatrix()
+{
+ int numberOfTestMatrices = 10;
+
+ for (int i=0; i<numberOfTestMatrices; i++)
+ {
+ // Construct pseudo-random symmetric test matrix
+ FieldMatrix<field_type,dim,dim> testMatrix;
+ for (int j=0; j<dim; j++)
+ for (int k=j; k<dim; k++)
+ testMatrix[j][k] = testMatrix[k][j] = ((int)(M_PI*j*k*i))%100 - 1;
+
+ FieldVector<field_type,dim> eigenValues;
+ FieldMatrix<field_type,dim,dim> eigenVectors;
+ FMatrixHelp::eigenValuesVectors(testMatrix, eigenValues, eigenVectors);
+
+ // Make sure the compute numbers really are the eigenvalues
+ /*for (int j=0; j<dim; j++)
+ {
+ FieldMatrix<field_type,dim,dim> copy = testMatrix;
+ for (int k=0; k<dim; k++)
+ copy[k][k] -= eigenValues[j];
+
+ if (std::fabs(copy.determinant()) > 1e-8)
+ DUNE_THROW(MathError, "Value computed by FMatrixHelp::eigenValues is not an eigenvalue, Determinant: "+std::to_string(std::fabs(copy.determinant())));
+ }*/
+
+ // Make sure eigenvalues and eigenvectors are not NaN (the subsequent tests do not find this!)
+ for (int j=0; j<dim; j++)
+ {
+ using std::isnan;
+ if (isnan(eigenValues[j]))
+ DUNE_THROW(MathError, j << "-th eigenvalue is NaN!");
+
+ for (std::size_t k=0; k<dim; k++)
+ if (isnan(eigenVectors[j][k]))
+ DUNE_THROW(MathError, j << "-th eigenvector contains NaN!");
+ }
+
+ // Make sure the eigenvalues are in ascending order
+ for (int j=0; j<dim-1; j++)
+ if (eigenValues[j] > eigenValues[j+1] + 1e-10)
+ DUNE_THROW(MathError, "Values computed by FMatrixHelp::eigenValues are not in ascending order");
+
+ // Make sure the vectors really are eigenvectors for the computed eigenvalues
+ for (int j=0; j<dim; j++)
+ {
+ FieldVector<field_type, dim> Av;
+ testMatrix.mv(eigenVectors[j], Av);
+ if((Av - eigenValues[j]*eigenVectors[j]).two_norm() > dim*std::sqrt(std::numeric_limits<field_type>::epsilon()))
+ DUNE_THROW(MathError, "Vector computed by FMatrixHelp::eigenValuesVectors is not an eigenvector");
+ }
+
+ // Make sure the eigenvectors have unit length
+ for(auto& ev : eigenVectors) {
+ constexpr double tol = std::max<double>(std::numeric_limits<field_type>::epsilon(),
+ std::numeric_limits<double>::epsilon());
+ if(std::abs(ev.two_norm())-1 > dim*tol)
+ DUNE_THROW(MathError, "Vector computed by FMatrixHelp::eigenValuesVectors does not have unit length");
+ }
+
+ }
+}
+
+template<typename field_type, int dim>
+void compareEigenvectorSets(FieldMatrix<field_type,dim,dim> evec,
+ FieldVector<field_type,dim> refEval,
+ FieldMatrix<field_type,dim,dim> refEvec)
+{
+ field_type th = dim*std::sqrt(std::numeric_limits<field_type>::epsilon());
+
+ std::size_t i=0;
+ std::size_t shift;
+ std::list<FieldVector<field_type,dim>> refEvecList;
+ field_type currentEval;
+
+ while(i<dim) {
+ shift=i;
+ currentEval = refEval[i];
+ while(i<dim && refEval[i]==currentEval) {
+ refEvecList.push_back(refEvec[i]);
+ ++i;
+ }
+ for(std::size_t j=0; j<refEvecList.size(); ++j) {
+ bool found = false;
+ auto it = refEvecList.begin();
+ while(!found && it != refEvecList.end()) {
+ if((evec[shift+j]-*it).two_norm() < th || (-1.0*evec[shift+j]-*it).two_norm() < th)
+ found = true;
+ else
+ ++it;
+ }
+ if(!found)
+ DUNE_THROW(MathError, "Eigenvector [" << evec[j] << "] for eigenvalue "
+ << currentEval << " not found within the reference solutions [" << refEvec << "]");
+ }
+ refEvecList.clear();
+ }
+}
+
+template<typename field_type, int dim>
+void checkMatrixWithReference(FieldMatrix<field_type, dim, dim> matrix,
+ FieldMatrix<field_type, dim, dim> refEvec,
+ FieldVector<field_type, dim> refEval)
+{
+ //normalize reference
+ for(auto& ev : refEvec)
+ ev /= ev.two_norm();
+
+ field_type th = dim*std::sqrt(std::numeric_limits<field_type>::epsilon());
+
+ FieldMatrix<field_type,dim,dim> eigenvectors;
+ FieldVector<field_type,dim> eigenvalues;
+
+ FMatrixHelp::eigenValuesVectors(matrix, eigenvalues, eigenvectors);
+
+ if((eigenvalues-refEval).two_norm() > th)
+ DUNE_THROW(MathError, "Eigenvalues [" << eigenvalues << "] computed by FMatrixHelp::eigenValuesVectors do not match the reference solution [" << refEval << "]");
+ try {
+ compareEigenvectorSets(eigenvectors, refEval, refEvec);
+ }
+ catch(Dune::MathError& e) {
+ std::cerr << "Computations by `FMatrixHelp::eigenValuesVectors`: " << e.what() << std::endl;
+ }
+}
+
+template<typename field_type, int dim>
+void checkMatrixWithLAPACK(FieldMatrix<field_type, dim, dim> matrix)
+{
+ field_type th = dim*std::sqrt(std::numeric_limits<field_type>::epsilon());
+
+ FieldMatrix<field_type,dim,dim> eigenvectors, refEvec;
+ FieldVector<field_type,dim> eigenvalues, refEval;
+
+ FMatrixHelp::eigenValuesVectors(matrix, eigenvalues, eigenvectors);
+ FMatrixHelp::eigenValuesVectorsLapack(matrix, refEval, refEvec);
+
+ if((eigenvalues-refEval).two_norm() > th)
+ DUNE_THROW(MathError, "Eigenvalues [" << eigenvalues << "] computed by FMatrixHelp::eigenValuesVectorsLapack do not match the reference solution [" << refEval << "]");
+ try {
+ compareEigenvectorSets(eigenvectors, refEval, refEvec);
+ }
+ catch(Dune::MathError& e) {
+ std::cerr << "Computations by `FMatrixHelp::eigenValuesVectorsLapack`: " << e.what() << std::endl;
+ }
+}
+
+template<class FT>
+void checkMultiplicity()
+{
+ //--2d--
+ //repeated eigenvalue (x2)
+ checkMatrixWithReference<FT,2>({{1, 0},{0, 1}}, {{1,0}, {0,1}}, {1, 1});
+
+ //eigenvalues with same magnitude (x2)
+ checkMatrixWithReference<FT,2>({{0, 1}, {1, 0}}, {{1,-1}, {1,1}}, {-1, 1});
+
+ // singular matrix
+ checkMatrixWithReference<FT,2>({{1, 0},{0, 0}}, {{0,1}, {1,0}}, {0, 1});
+
+ // another singular matrix (triggers a different code path)
+ checkMatrixWithReference<FT,2>({{0, 0},{0, 1}}, {{1,0}, {0,1}}, {0, 1});
+
+ // Seemingly simple diagonal matrix -- triggers unstable detection of zero columns
+ checkMatrixWithReference<FT,2>({{1.01, 0},{0, 1}}, {{0,1}, {1,0}}, {1, 1.01});
+
+ // check 2x2 zero matrix
+ checkMatrixWithReference<FT,2>({{ 0, 0},
+ { 0, 0}},
+ {{1,0}, {0,1}},
+ {0, 0});
+
+ //--3d--
+ //repeated eigenvalue (x3)
+ checkMatrixWithReference<FT,3>({{ 1, 0, 0},
+ { 0, 1, 0},
+ { 0, 0, 1}},
+ {{1,0,0}, {0,1,0}, {0,0,1}},
+ {1, 1, 1});
+
+ //eigenvalues with same magnitude (x2)
+ checkMatrixWithReference<FT,3>({{ 0, 1, 0},
+ { 1, 0, 0},
+ { 0, 0, 5}},
+ {{-1,1,0}, {1,1,0}, {0,0,1}},
+ {-1, 1, 5});
+
+ //repeated eigenvalue (x2)
+ checkMatrixWithReference<FT,3>({{ 3, -2, 0},
+ { -2, 3, 0},
+ { 0, 0, 5}},
+ {{1,1,0}, {0,0,1}, {1,-1,0}},
+ {1, 5, 5});
+
+ // singular non-diagonal matrix
+ checkMatrixWithReference<FT,3>({{ 0, 0, 0},
+ { 0, 1, 1},
+ { 0, 1, 1}},
+ {{1,0,0}, {0,FT(-1.0/std::sqrt(2.0)),FT(1.0/std::sqrt(2.0))}, {0,FT(1.0/std::sqrt(2.0)),FT(1.0/std::sqrt(2.0))}},
+ {0, 0, 2});
+
+ // singular diagonal matrix (that's a different code path again)
+ checkMatrixWithReference<FT,3>({{ 0, 0, 0},
+ { 0, 1, 0},
+ { 0, 0, 0}},
+ {{1,0,0}, {0,0,1}, {0,1,0}},
+ {0, 0, 1});
+
+ // diagonal matrix whose largest eigenvalue is not 1
+ // this tests the matrix scaling employed by the eigenvector code.
+ checkMatrixWithReference<FT,3>({{ 3, 0, 0},
+ { 0, 2, 0},
+ { 0, 0, 4}},
+ {{0,1,0}, {1,0,0}, {0,0,1}},
+ {2, 3, 4});
+
+ // check 3x3 zero matrix
+ checkMatrixWithReference<FT,3>({{ 0, 0, 0},
+ { 0, 0, 0},
+ { 0, 0, 0}},
+ {{1,0,0}, {0,1,0}, {0,0,1}},
+ {0, 0, 0});
+
+ //repeat tests with LAPACK (if found)
+#if HAVE_LAPACK
+ checkMatrixWithLAPACK<FT,2>({{1, 0}, {0, 1}});
+ checkMatrixWithLAPACK<FT,2>({{0, 1}, {1, 0}});
+ checkMatrixWithLAPACK<FT,3>({{1,0,0}, {0,1,0}, {0,0,1}});
+ checkMatrixWithLAPACK<FT,3>({{0,1,0}, {1,0,0}, {0,0,5}});
+ checkMatrixWithLAPACK<FT,3>({{3,-2,0}, {-2,3,0}, {0,0,5}});
+#endif
+
+}
+
+int main()
+{
+#if HAVE_LAPACK
+ testRosserMatrix<double>();
+ testRosserMatrix<float>();
+ testRosserMatrix<long double>();
+#else
+ std::cout << "WARNING: eigenvaluetest needs LAPACK, test disabled" << std::endl;
+#endif // HAVE_LAPACK
+
+ //we basically just test LAPACK here, so maybe discard those tests
+#if HAVE_LAPACK
+ testSymmetricFieldMatrix<double,4>();
+ testSymmetricFieldMatrix<double,200>();
+ testSymmetricFieldMatrix<float,4>();
+ testSymmetricFieldMatrix<float,200>();
+ testSymmetricFieldMatrix<long double,4>();
+ testSymmetricFieldMatrix<long double,200>();
+#endif // HAVE_LAPACK
+
+ testSymmetricFieldMatrix<double,2>();
+ testSymmetricFieldMatrix<double,3>();
+ testSymmetricFieldMatrix<float,2>();
+ testSymmetricFieldMatrix<float,3>();
+ testSymmetricFieldMatrix<long double,2>();
+ testSymmetricFieldMatrix<long double,3>();
+
+ checkMultiplicity<double>();
+ checkMultiplicity<float>();
+ checkMultiplicity<long double>();
+
+ return 0;
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <dune/common/enumset.hh>
+#include <iostream>
+int main()
+{
+ using namespace Dune;
+ std::cout<<Combine<EnumItem<int,1>,EnumItem<int,2>,int>::contains(1)<<
+ " "<<Combine<EnumItem<int,1>,EnumItem<int,2>,int>::contains(2)<<
+ " "<<Combine<Combine<EnumItem<int,1>,EnumItem<int,2>,int>,EnumItem<int,0>,int>::contains(3)<<
+ " "<<EnumRange<int,1,3>::contains(3)<<std::endl;
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <array>
+#include <iostream>
+#include <type_traits>
+
+#include <dune/common/filledarray.hh>
+
+int main() {
+
+ int status = 0;
+
+ auto test1 = Dune::filledArray<2>(2.0);
+ static_assert(std::is_same<decltype(test1), std::array<double, 2> >::value,
+ "Wrong result type for Dune::filledArray()");
+
+ if(test1[0] != 2.0 || test1[1] != 2.0)
+ {
+ std::cerr << "Dune::filledArray() produces wrong value" << std::endl;
+ status = 1;
+ }
+
+#ifdef __cpp_lib_array_constexpr
+ std::cout << "The result of Dune::filledArray() is constexpr" << std::endl;
+ constexpr auto test2 = Dune::filledArray<2>(2);
+ (void)test2;
+#else // !__cpp_lib_array_constexpr
+ std::cout << "Not checking whether Dune::filledArray() is constexpr\n"
+ << "since the library does not declare std::array as constexpr\n"
+ << "(__cpp_lib_array_constexpr is not defined)." << std::endl;
+#endif // !__cpp_lib_array_constexpr
+
+ return status;
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+// Activate checking.
+#ifndef DUNE_FMatrix_WITH_CHECKING
+#define DUNE_FMatrix_WITH_CHECKING
+#endif
+
+#include <algorithm>
+#include <cassert>
+#include <complex>
+#include <iostream>
+#include <vector>
+
+#include <dune/common/classname.hh>
+#include <dune/common/fmatrix.hh>
+#include <dune/common/fmatrixev.hh>
+#include <dune/common/ftraits.hh>
+#include <dune/common/quadmath.hh>
+#include <dune/common/rangeutilities.hh>
+#include <dune/common/simd/loop.hh>
+#include <dune/common/simd/simd.hh>
+#if HAVE_VC
+#include <dune/common/simd/vc.hh>
+#endif
+
+#include "checkmatrixinterface.hh"
+
+using namespace Dune;
+
+template<typename T, std::size_t n>
+int test_invert_solve(Dune::FieldMatrix<T, n, n> &A,
+ Dune::FieldMatrix<T, n, n> &inv,
+ Dune::FieldVector<T, n> &x,
+ Dune::FieldVector<T, n> &b,
+ bool doPivoting = true)
+{
+ using std::abs;
+
+ int ret=0;
+
+ std::cout <<"Checking inversion of:"<<std::endl;
+
+ FieldMatrix<T,n,n> calced_inv;
+ FieldVector<T,n> calced_x;
+
+ std::cout<<A<<std::endl;
+
+ // Check whether given inverse is correct
+ FieldMatrix<T,n,n> prod = A;
+ prod.rightmultiply(inv);
+ for (size_t i=0; i<n; i++)
+ prod[i][i] -= 1;
+
+ bool equal=true;
+ if (Simd::anyTrue(prod.infinity_norm() > 1e-6)) {
+ std::cerr<<"Given inverse wrong"<<std::endl;
+ equal=false;
+ }
+
+ FieldMatrix<T,n,n> copy(A);
+ A.invert(doPivoting);
+
+ calced_inv = A;
+ A-=inv;
+
+
+ auto epsilon = std::numeric_limits<typename FieldTraits<T>::real_type>::epsilon();
+ auto tolerance = 10*epsilon;
+ for(size_t i =0; i < n; ++i)
+ for(size_t j=0; j <n; ++j)
+ if(Simd::anyTrue(abs(A[i][j])>tolerance)) {
+ std::cerr<<"calculated inverse wrong at ("<<i<<","<<j<<")"<<std::endl;
+ equal=false;
+ }
+
+ if(!equal) {
+ ret++;
+ std::cerr<<"Calculated inverse was:"<<std::endl;
+ std::cerr <<calced_inv<<std::endl;
+ std::cerr<<"Should have been"<<std::endl;
+ std::cerr<<inv << std::endl;
+ }else
+ std::cout<<"Result is"<<std::endl<<calced_inv<<std::endl;
+
+
+ std::cout<<"Checking solution for rhs="<<b<<std::endl;
+
+ // Check whether given solution is correct
+ FieldVector<T,n> trhs=b;
+
+ copy.mmv(x,trhs);
+ equal=true;
+
+ if (Simd::anyTrue(trhs.infinity_norm() > 1e-6)) {
+ std::cerr<<"Given rhs does not fit solution"<<std::endl;
+ equal=false;
+ }
+ copy.solve(calced_x, b, doPivoting);
+ FieldVector<T,n> xcopy(calced_x);
+ xcopy-=x;
+
+ equal=true;
+
+ for(size_t i =0; i < n; ++i)
+ if(Simd::anyTrue(abs(xcopy[i])>tolerance)) {
+ std::cerr<<"calculated isolution wrong at ("<<i<<")"<<std::endl;
+ equal=false;
+ }
+
+ if(!equal) {
+ ret++;
+ std::cerr<<"Calculated solution was:"<<std::endl;
+ std::cerr <<calced_x<<std::endl;
+ std::cerr<<"Should have been"<<std::endl;
+ std::cerr<<x<<std::endl;
+ std::cerr<<"difference is "<<xcopy<<std::endl;
+ }else
+ std::cout<<"Result is "<<calced_x<<std::endl;
+
+ return ret;
+}
+
+
+int test_invert_solve()
+{
+ int ret = 0;
+
+ using FM = Dune::FieldMatrix<double, 3, 3>;
+ using FV = Dune::FieldVector<double, 3>;
+
+ FM A_data = {{1, 5, 7}, {2, 14, 15}, {4, 40, 39}};
+ FM inv_data = {{-9.0 / 4, 85.0 / 24, -23.0 / 24},
+ {-3.0 / 4, 11.0 / 24, -1.0 / 24},
+ {1, -5.0 / 6, 1.0 / 6}};
+ FV b = {32, 75, 201};
+ FV x = {1, 2, 3};
+ ret += test_invert_solve<double, 3>(A_data, inv_data, x, b);
+
+ FM A_data0 = {{-0.5, 0, -0.25}, {0.5, 0, -0.25}, {0, 0.5, 0}};
+ FM inv_data0 = {{-1, 1, 0}, {0, 0, 2}, {-2, -2, 0}};
+ FV b0 = {32, 75, 201};
+ FV x0 = {43, 402, -214};
+ ret += test_invert_solve<double, 3>(A_data0, inv_data0, x0, b0);
+
+ FM A_data1 = {{0, 1, 0}, {1, 0, 0}, {0, 0, 1}};
+ FV b1 = {0, 1, 2};
+ FV x1 = {1, 0, 2};
+ ret += test_invert_solve<double, 3>(A_data1, A_data1, x1, b1);
+
+ FM A_data2 = {{3, 1, 6}, {2, 1, 3}, {1, 1, 1}};
+ FM inv_data2 = {{-2, 5, -3}, {1, -3, 3}, {1, -2, 1}};
+ FV b2 = {2, 7, 4};
+ FV x2 = {19, -7, -8};
+ ret += test_invert_solve<double, 3>(A_data2, inv_data2, x2, b2);
+
+ using FM6 = Dune::FieldMatrix<double, 6, 6>;
+ using FV6 = Dune::FieldVector<double, 6>;
+ using FM6f = Dune::FieldMatrix<float, 6, 6>;
+ using FV6f = Dune::FieldVector<float, 6>;
+ using FM6c = Dune::FieldMatrix<std::complex<double>, 6, 6>;
+ using FV6c = Dune::FieldVector<std::complex<double>, 6>;
+ using FM6cf = Dune::FieldMatrix<std::complex<float>, 6, 6>;
+ using FV6cf = Dune::FieldVector<std::complex<float>, 6>;
+ FM6 A_data3 = {{0.1756212892262638, 0.18004482126181995, -0.49348712464381461, 0.49938830949606494, -0.7073160963417815, 1.0595994834402057e-06},
+ {0.17562806606385517, 0.18005184462676252, -0.49354113600539418, 0.50059575375120657, 0.70689735319270453, -3.769499436967368e-07},
+ {0.17562307226079987, 0.1800466692525447, -0.49350050991711036, -0.5000065175076156, 0.00018887507812282846, -0.70710715811504954},
+ {0.17562308446070105, 0.18004668189625178, -0.49350060714612815, -0.50000869003275417, 0.00019031361405394119, 0.70710640425695015},
+ {-0.0072214111281474463, 0.93288324029450198, -0.11009998093332186, -1.7482015044681947e-06, -2.35420746900079e-06, -4.2380607559371285e-09},
+ {0.93625470097440933, -0.0077746247590777659, -0.11696151733678119, -1.8717676241478393e-06, -2.5225363177584535e-06, -4.5410877139483271e-09}};
+ FM6 inv_data3 = {{-0.069956619842954, -0.069956322880040, -0.069956501823745, -0.069956501289142, 0.063349638850509, 1.121064161778902},
+ {-0.066113473123754, -0.066113223084417, -0.066113362249636, -0.066113361799508, 1.123470950632021, 0.058271943290769},
+ {-0.555587502096003, -0.555615651279932, -0.555585807267011, -0.555585857939820, 0.432422844944552, 0.420211281044740},
+ { 0.499710573383257, 0.500274796075355, -0.500006831431901, -0.500007846623773, 0.000003909674199, 0.000003817686226},
+ {-0.707554041861306, 0.706659150542343, 0.000405628342406, 0.000407065756770, 0.000010628642550, 0.000010383891450},
+ { 0.000001450379141, 0.000000012708409, -0.707107586716496, 0.707105975654669, 0.000000019133995, 0.000000018693387}};
+ FV6 b3 = {1, 1, 1, 1, 1, 1};
+ FV6 x3 = {0.904587854793530, 0.917289473665475, -1.369740692593475, -0.000021581236636, -0.000061184685788, -0.000000110146895};
+ FM6f A_data3f, inv_data3f;
+ FM6c A_data3c, inv_data3c;
+ FM6cf A_data3cf, inv_data3cf;
+ std::copy(A_data3.begin(), A_data3.end(), A_data3f.begin());
+ std::copy(inv_data3.begin(), inv_data3.end(), inv_data3f.begin());
+ std::copy(A_data3.begin(), A_data3.end(), A_data3c.begin());
+ std::copy(inv_data3.begin(), inv_data3.end(), inv_data3c.begin());
+ std::copy(A_data3.begin(), A_data3.end(), A_data3cf.begin());
+ std::copy(inv_data3.begin(), inv_data3.end(), inv_data3cf.begin());
+ FV6f b3f = b3;
+ FV6f x3f = x3;
+ FV6c b3c = b3;
+ FV6c x3c = x3;
+ FV6cf b3cf = b3;
+ FV6cf x3cf = x3;
+#if HAVE_VC
+ using FM6vc = Dune::FieldMatrix< Vc::SimdArray<double, 8>, 6, 6>;
+ using FV6vc = Dune::FieldVector< Vc::SimdArray<double, 8>, 6>;
+ FM6vc A_data3vc, inv_data3vc;
+ std::copy(A_data3.begin(), A_data3.end(), A_data3vc.begin());
+ std::copy(inv_data3.begin(), inv_data3.end(), inv_data3vc.begin());
+ FV6vc b3vc = b3;
+ FV6vc x3vc = x3;
+ ret += test_invert_solve< Vc::SimdArray<double, 8>, 6>(A_data3vc, inv_data3vc, x3vc, b3vc);
+#endif
+ ret += test_invert_solve<double, 6>(A_data3, inv_data3, x3, b3);
+ ret += test_invert_solve<std::complex<double>, 6>(A_data3c, inv_data3c, x3c, b3c);
+ ret += test_invert_solve<std::complex<float>, 6>(A_data3cf, inv_data3cf, x3cf, b3cf);
+ ret += test_invert_solve<float, 6>(A_data3f, inv_data3f, x3f, b3f);
+
+ FM A_data4 = {{2, -1, 0}, {-1, 2, -1}, {0, -1, 2}};
+ FM inv_data4 = {{0.75, 0.5, 0.25}, {0.5, 1, 0.5}, {0.25, 0.5, 0.75}};
+ FV b4 = {1, 2, 3};
+ FV x4 = {2.5, 4, 3.5};
+ ret += test_invert_solve<double, 3>(A_data4, inv_data4, x4, b4, false);
+ return ret;
+}
+
+template<class K, int n, int m, class X, class Y, class XT, class YT>
+void test_mult(FieldMatrix<K, n, m>& A,
+ X& v, Y& f, XT& vT, YT& fT)
+{
+ // test the various matrix-vector products
+ A.mv(v,f);
+ A.mtv(fT,vT);
+ A.umv(v,f);
+ A.umtv(fT,vT);
+ A.umhv(fT,vT);
+ A.mmv(v,f);
+ A.mmtv(fT,vT);
+ A.mmhv(fT,vT);
+ using S = typename FieldTraits<Y>::field_type;
+ using S2 = typename FieldTraits<XT>::field_type;
+ S scalar = (S)(0.5);
+ S2 scalar2 = (S2)(0.5);
+ A.usmv(scalar,v,f);
+ A.usmtv(scalar2,fT,vT);
+ A.usmhv(scalar2,fT,vT);
+}
+
+template<class K, class K2, class K3, int n, int m>
+void test_matrix()
+{
+ typedef typename FieldMatrix<K,n,m>::size_type size_type;
+
+ FieldMatrix<K,n,m> A;
+ FieldVector<K2,m> v;
+ FieldVector<K3,n> f;
+
+ // test constexpr size
+ static_assert(A.N() == n, "");
+ static_assert(A.M() == m, "");
+
+ // assign matrix
+ A=K();
+ // random access matrix
+ for (size_type i=0; i<n; i++)
+ for (size_type j=0; j<m; j++)
+ A[i][j] = i*j;
+ // iterator matrix
+ auto rit = A.begin();
+ for (; rit!=A.end(); ++rit)
+ {
+ rit.index();
+ auto cit = rit->begin();
+ for (; cit!=rit->end(); ++cit)
+ {
+ cit.index();
+ (*cit) *= 2;
+ }
+ }
+
+ // assign vector
+ f = 1;
+
+ // random access vector
+ for (size_type i=0; i<v.dim(); i++)
+ v[i] = i;
+ // iterator vector
+ auto it = v.begin();
+ auto end = v.end();
+ for (; it!=end; ++it)
+ {
+ it.index();
+ (*it) *= 2;
+ }
+ // reverse iterator vector
+ it = v.beforeEnd();
+ end = v.beforeBegin();
+ for (; it!=end; --it)
+ (*it) /= 2;
+ // find vector
+ for (size_type i=0; i<v.dim(); i++)
+ {
+ it = v.find(i);
+ (*it) += 1;
+ }
+
+ // matrix vector product
+ A.umv(v,f);
+ // check that mv and umv are doing the same thing
+ {
+ FieldVector<K3,n> res2(0);
+ FieldVector<K3,n> res1;
+
+ FieldVector<K2,m> b(1);
+
+ A.mv(b, res1);
+ A.umv(b, res2);
+
+ if( (res1 - res2).two_norm() > 1e-12 )
+ {
+ DUNE_THROW(FMatrixError,"mv and umv are not doing the same!");
+ }
+ }
+
+ {
+ FieldVector<K2,m> v0 (v);
+ FieldVector<K3,n> f0 (f);
+ FieldVector<K3,m> vT (0);
+ FieldVector<K2,n> fT (0);
+ test_mult(A, v0, f0, vT, fT);
+ }
+
+ // {
+ // std::vector<K2> v1( m ) ;
+ // std::vector<K3> f1( n, 1 ) ;
+ // // random access vector
+ // for (size_type i=0; i<v1.size(); i++) v1[i] = i;
+ // test_mult(A, v1, f1 );
+ // }
+ // {
+ // K2 v2[ m ];
+ // K3 f2[ n ];
+ // // random access vector
+ // for (size_type i=0; i<m; ++i) v2[i] = i;
+ // for (size_type i=0; i<n; ++i) f2[i] = 1;
+ // test_mult(A, v2, f2 );
+ // }
+
+ // Test the different matrix norms
+ assert( A.frobenius_norm() >= 0 );
+ assert( A.frobenius_norm2() >= 0 );
+ assert( A.infinity_norm() >= 0 );
+ assert( A.infinity_norm_real() >= 0);
+
+ // print matrix
+ std::cout << A << std::endl;
+ // print vector
+ std::cout << f << std::endl;
+
+ A[0][0] += 5; // Make matrix non-zero
+ {
+ // Test that operator= and operator-= work before we can test anything else
+ using FM = FieldMatrix<K,n,m>;
+ FM A0 = A;
+ {
+ if (A0.infinity_norm() < 1e-12)
+ DUNE_THROW(FMatrixError, "Assignment had no effect!");
+ }
+ A0 -= A;
+ {
+ if (A0.infinity_norm() > 1e-12)
+ DUNE_THROW(FMatrixError, "Operator-= had no effect!");
+ }
+ FM A1 = A; // A1 == A
+ FM A2 = (A1 *= 2); // A1 == A2 == 2*A
+ {
+ FM tmp = A1; tmp -= A;
+ if (tmp.infinity_norm() < 1e-12)
+ DUNE_THROW(FMatrixError,"Operator*= had no effect!");
+ }
+ {
+ FM tmp = A2; tmp -= A1;
+ if (tmp.infinity_norm() > 1e-12)
+ DUNE_THROW(FMatrixError, "Return value of Operator*= incorrect!");
+ }
+ [[maybe_unused]] FM A3 = (A2 *= 3); // A2 == A3 == 6*A
+ FM A4 = (A2 /= 2); // A2 == A4 == 3*A;
+ FM A5 = A;
+ A5 *= 3; // A5 == 3*A
+ {
+ FM tmp = A2; tmp -= A5;
+ if (tmp.infinity_norm() > 1e-12)
+ DUNE_THROW(FMatrixError, "Operator/= had no effect!");
+ }
+ {
+ FM tmp = A4; tmp -= A5;
+ if (tmp.infinity_norm() > 1e-12)
+ DUNE_THROW(FMatrixError, "Return value of Operator/= incorrect!");
+ }
+
+ FM A6 = A;
+ FM A7 = (A6 += A); // A6 == A7 == 2*A
+ {
+ FM tmp = A1; tmp -= A6;
+ if (tmp.infinity_norm() > 1e-12)
+ DUNE_THROW(FMatrixError, "Operator+= had no effect!");
+ }
+ {
+ FM tmp = A1; tmp -= A7;
+ if (tmp.infinity_norm() > 1e-12)
+ DUNE_THROW(FMatrixError, "Return value of Operator+= incorrect!");
+ }
+
+ FM A8 = A2; // A8 == A2 == 3*A
+ FM A9 = (A8 -= A); // A9 == A8 == 2*A;
+ {
+ FM tmp = A8; tmp -= A1;
+ if (tmp.infinity_norm() > 1e-12)
+ DUNE_THROW(FMatrixError, "Operator-= had no effect!");
+ }
+ {
+ FM tmp = A9; tmp -= A1;
+ if (tmp.infinity_norm() > 1e-12)
+ DUNE_THROW(FMatrixError, "Return value of Operator-= incorrect!");
+ }
+ FM A10 = A;
+ FM A11 = A10.axpy(2, A); // A11 = 3*A;
+ {
+ FM tmp = A10; tmp -= A2;
+ if (tmp.infinity_norm() > 1e-12)
+ DUNE_THROW(FMatrixError, "axpy() had no effect!");
+ }
+ {
+ FM tmp = A10; tmp -= A11;
+ if (tmp.infinity_norm() > 1e-12)
+ DUNE_THROW(FMatrixError, "Return value of axpy() incorrect!");
+ }
+
+ // Scalar * Matrix and Matrix * Scalar
+ {
+ typename FM::field_type scalar = 3;
+ FM sA = scalar * A;
+ FM aS = A * scalar;
+ FM ref = A;
+ ref *= scalar;
+
+ if ((sA-ref).infinity_norm() > 1e-12)
+ DUNE_THROW(FMatrixError, "Return value of operator*(scalar,matrix) incorrect!");
+
+ if ((aS-ref).infinity_norm() > 1e-12)
+ DUNE_THROW(FMatrixError, "Return value of operator*(matrix,scalar) incorrect!");
+ }
+
+ // Matrix / Scalar
+ {
+ typename FM::field_type scalar = 3;
+ FM aS = A / scalar;
+ FM ref = A;
+ ref /= scalar;
+
+ if ((aS-ref).infinity_norm() > 1e-12)
+ DUNE_THROW(FMatrixError, "Return value of operator/(matrix,scalar) incorrect!");
+ }
+
+ // Matrix + Matrix
+ {
+ FM twiceA = A + A;
+ FM ref = typename FM::field_type(2)*A;
+
+ if ((twiceA-ref).infinity_norm() > 1e-12)
+ DUNE_THROW(FMatrixError, "Return value of operator+(matrix,matrix) incorrect!");
+ }
+
+ // Matrix - Matrix
+ {
+ FM zero = A - A;
+
+ if (zero.infinity_norm() > 1e-12)
+ DUNE_THROW(FMatrixError, "Return value of operator-(matrix,matrix) incorrect!");
+ }
+
+ // -Matrix
+ {
+ FM neg = -A;
+ FM ref = typename FM::field_type(-1)*A;
+
+ if ((neg-ref).infinity_norm() > 1e-12)
+ DUNE_THROW(FMatrixError, "Return value of operator-(matrix) incorrect!");
+ }
+
+ // Matrix * Matrix
+ {
+ auto transposed = [](const FM& A)
+ {
+ FieldMatrix<typename FM::field_type,FM::cols,FM::rows> AT;
+ for (int i=0; i<AT.rows; i++)
+ for (int j=0; j<AT.cols; j++)
+ AT[i][j] = A[j][i];
+
+ return AT;
+ };
+
+ [[maybe_unused]] auto product = transposed(A) * A;
+ }
+
+ }
+ {
+ using std::abs;
+
+ FieldMatrix<K,n,m> A3 = A;
+ A3 *= 3;
+
+ FieldMatrix<K,n,m> B = A;
+ B.axpy( K( 2 ), B );
+ B -= A3;
+ if (abs(B.infinity_norm()) > 1e-12)
+ DUNE_THROW(FMatrixError,"Axpy test failed!");
+ }
+ {
+ using std::abs;
+
+ FieldMatrix<K,n,n+1> A2;
+ for(size_type i=0; i<A2.N(); ++i)
+ for(size_type j=0; j<A2.M(); ++j)
+ A2[i][j] = i;
+ const FieldMatrix<K,n,n+1>& Aref = A2;
+
+
+ FieldMatrix<K,n+1,n+1> B;
+ for(size_type i=0; i<B.N(); ++i)
+ for(size_type j=0; j<B.M(); ++j)
+ B[i][j] = i;
+ const FieldMatrix<K,n+1,n+1>& Bref = B;
+
+ FieldMatrix<K,n,n> C;
+ for(size_type i=0; i<C.N(); ++i)
+ for(size_type j=0; j<C.M(); ++j)
+ C[i][j] = i;
+ const FieldMatrix<K,n,n>& Cref = C;
+
+ FieldMatrix<K,n,n+1> AB = Aref.rightmultiplyany(B);
+ for(size_type i=0; i<AB.N(); ++i)
+ for(size_type j=0; j<AB.M(); ++j)
+ if (abs(AB[i][j] - K(i*n*(n+1)/2)) > 1e-10)
+ DUNE_THROW(FMatrixError,"Rightmultiplyany test failed!");
+
+ FieldMatrix<K,n,n+1> AB2 = A2;
+ AB2.rightmultiply(B);
+ AB2 -= AB;
+ if (abs(AB2.infinity_norm()) > 1e-10)
+ DUNE_THROW(FMatrixError,"Rightmultiply test failed!");
+
+ FieldMatrix<K,n,n+1> AB3 = Bref.leftmultiplyany(A2);
+ AB3 -= AB;
+ if (abs(AB3.infinity_norm()) > 1e-10)
+ DUNE_THROW(FMatrixError,"Leftmultiplyany test failed!");
+
+ FieldMatrix<K,n,n+1> CA = Aref.leftmultiplyany(C);
+ for(size_type i=0; i<CA.N(); ++i)
+ for(size_type j=0; j<CA.M(); ++j)
+ if (abs(CA[i][j] - K(i*n*(n-1)/2)) > 1e-10)
+ DUNE_THROW(FMatrixError,"Leftmultiplyany test failed!");
+
+ FieldMatrix<K,n,n+1> CA2 = A2;
+ CA2.leftmultiply(C);
+ CA2 -= CA;
+ if (abs(CA2.infinity_norm()) > 1e-10)
+ DUNE_THROW(FMatrixError,"Leftmultiply test failed!");
+
+ FieldMatrix<K,n,n+1> CA3 = Cref.rightmultiplyany(A2);
+ CA3 -= CA;
+ if (abs(CA3.infinity_norm()) > 1e-10)
+ DUNE_THROW(FMatrixError,"Rightmultiplyany test failed!");
+ }
+}
+
+template<class T>
+int test_determinant()
+{
+ using std::abs;
+
+ int ret = 0;
+
+ FieldMatrix<T, 4, 4> B;
+ B[0][0] = 3.0; B[0][1] = 0.0; B[0][2] = 1.0; B[0][3] = 0.0;
+ B[1][0] = -1.0; B[1][1] = 3.0; B[1][2] = 0.0; B[1][3] = 0.0;
+ B[2][0] = -3.0; B[2][1] = 0.0; B[2][2] = -1.0; B[2][3] = 2.0;
+ B[3][0] = 0.0; B[3][1] = -1.0; B[3][2] = 0.0; B[3][3] = 1.0;
+ if (Simd::anyTrue(abs(B.determinant() + 2.0) > 1e-12))
+ {
+ std::cerr << "Determinant 1 test failed (" << Dune::className<T>() << ")"
+ << std::endl;
+ std::cerr << "Determinant 1 is " << B.determinant(true) << ", expected 2.0"
+ << std::endl;
+ ++ret;
+ }
+
+ B[0][0] = 3.0; B[0][1] = 0.0; B[0][2] = 1.0; B[0][3] = 0.0;
+ B[1][0] = -1.0; B[1][1] = 3.0; B[1][2] = 0.0; B[1][3] = 0.0;
+ B[2][0] = -3.0; B[2][1] = 0.0; B[2][2] = -1.0; B[2][3] = 2.0;
+ B[3][0] = -1.0; B[3][1] = 3.0; B[3][2] = 0.0; B[3][3] = 2.0;
+ if (Simd::anyTrue(B.determinant(false) != 0.0))
+ {
+ std::cerr << "Determinant 2 test failed (" << Dune::className<T>() << ")"
+ << std::endl;
+ std::cerr << "Determinant 2 is " << B.determinant(false) << ", expected 0.0"
+ << std::endl;
+ ++ret;
+ }
+
+ return ret;
+}
+
+template<class ft>
+struct ScalarOperatorTest
+{
+ ScalarOperatorTest()
+ {
+ ft a = 1;
+ ft c = 2;
+ FieldMatrix<ft,1,1> v(2);
+ FieldMatrix<ft,1,1> w(2);
+ [[maybe_unused]] bool b;
+
+ std::cout << __func__ << "\t ( " << className(v) << " )" << std::endl;
+
+ a = a * c;
+ a = a + c;
+ a = a / c;
+ a = a - c;
+
+ v = a;
+ v = w = v;
+ a = v;
+
+ a = v + a;
+ a = v - a;
+ a = v * a;
+ a = v / a;
+
+ v = v + a;
+ v = v - a;
+ v = v * a;
+ v = v / a;
+
+ a = a + v;
+ a = a - v;
+ a = a * v;
+ a = a / v;
+
+ v = a + v;
+ v = a - v;
+ v = a * v;
+ v = a / v;
+
+ v -= w;
+ v -= a;
+ v += w;
+ v += a;
+ v *= a;
+ v /= a;
+
+ b = (v == a);
+ b = (v != a);
+ b = (a == v);
+ b = (a != v);
+
+ }
+};
+
+template<typename ft>
+void test_ev()
+{
+ // rosser test matrix
+
+ /*
+ This matrix was a challenge for many matrix eigenvalue
+ algorithms. But the Francis QR algorithm, as perfected by
+ Wilkinson and implemented in EISPACK, has no trouble with it. The
+ matrix is 8-by-8 with integer elements. It has:
+
+ * A double eigenvalue
+ * Three nearly equal eigenvalues
+ * Dominant eigenvalues of opposite sign
+ * A zero eigenvalue
+ * A small, nonzero eigenvalue
+
+ */
+ Dune::FieldMatrix<ft,8,8> A = {
+ { 611, 196, -192, 407, -8, -52, -49, 29 },
+ { 196, 899, 113, -192, -71, -43, -8, -44 },
+ { -192, 113, 899, 196, 61, 49, 8, 52 },
+ { 407, -192, 196, 611, 8, 44, 59, -23 },
+ { -8, -71, 61, 8, 411, -599, 208, 208 },
+ { -52, -43, 49, 44, -599, 411, 208, 208 },
+ { -49, -8, 8, 59, 208, 208, 99, -911 },
+ { 29, -44, 52, -23, 208, 208, -911, 99}
+ };
+
+ // compute eigenvalues
+ Dune::FieldVector<ft,8> eig;
+ Dune::FMatrixHelp::eigenValues(A, eig);
+
+ // test results
+ Dune::FieldVector<ft,8> ref;
+ /*
+ reference solution computed with octave 3.2
+
+ > format long e
+ > eig(rosser())
+
+ */
+ ref = { -1.02004901843000e+03,
+ -4.14362871168386e-14,
+ 9.80486407214362e-02,
+ 1.00000000000000e+03,
+ 1.00000000000000e+03,
+ 1.01990195135928e+03,
+ 1.02000000000000e+03,
+ 1.02004901843000e+03 };
+
+ if( (ref - eig).two_norm() > 1e-10 )
+ {
+ DUNE_THROW(FMatrixError,"error computing eigenvalues");
+ }
+
+ std::cout << "Eigenvalues of Rosser matrix: " << eig << std::endl;
+}
+
+template< class K, int n >
+void test_invert ()
+{
+ Dune::FieldMatrix< K, n, n > A( 1e-15 );
+ for( int i = 0; i < n; ++i )
+ A[ i ][ i ] = K( 1 );
+ A.invert();
+}
+
+template <class M>
+void checkNormNAN(M const &v, int line) {
+ if (!std::isnan(v.frobenius_norm())) {
+ std::cerr << "error: norm not NaN: frobenius_norm() on line "
+ << line << " (type: " << Dune::className(v[0]) << ")"
+ << std::endl;
+ std::exit(-1);
+ }
+ if (!std::isnan(v.infinity_norm())) {
+ std::cerr << "error: norm not NaN: infinity_norm() on line "
+ << line << " (type: " << Dune::className(v[0]) << ")"
+ << std::endl;
+ std::exit(-1);
+ }
+}
+
+// Make sure that matrices with NaN entries have norm NaN.
+// See also bug flyspray/FS#1147
+template <typename T>
+void
+test_nan(T const &mynan)
+{
+ T const n(0);
+ {
+ Dune::FieldMatrix<T, 2, 2> m = {
+ { mynan, mynan },
+ { mynan, mynan }
+ };
+ checkNormNAN(m, __LINE__);
+ }
+ {
+ Dune::FieldMatrix<T, 2, 2> m = {
+ { mynan, n },
+ { n, n }
+ };
+ checkNormNAN(m, __LINE__);
+ }
+ {
+ Dune::FieldMatrix<T, 2, 2> m = {
+ { n, mynan },
+ { n, n }
+ };
+ checkNormNAN(m, __LINE__);
+ }
+ {
+ Dune::FieldMatrix<T, 2, 2> m = {
+ { n, n },
+ { mynan, n }
+ };
+ checkNormNAN(m, __LINE__);
+ }
+ {
+ Dune::FieldMatrix<T, 2, 2> m = {
+ { n, n },
+ { n, mynan }
+ };
+ checkNormNAN(m, __LINE__);
+ }
+}
+
+// The computation of infinity_norm_real() was flawed from r6819 on
+// until r6915.
+void
+test_infinity_norms()
+{
+ using std::abs;
+
+ std::complex<double> threefour(3.0, -4.0);
+ std::complex<double> eightsix(8.0, -6.0);
+
+ Dune::FieldMatrix<std::complex<double>, 2, 2> m;
+ m[0] = threefour;
+ m[1] = eightsix;
+ assert(abs(m.infinity_norm() -20.0) < 1e-10); // max(5+5, 10+10)
+ assert(abs(m.infinity_norm_real()-28.0) < 1e-10); // max(7+7, 14+14)
+}
+
+
+template< class K, class K2, int rows, int cols >
+void test_interface()
+{
+ typedef CheckMatrixInterface::UseFieldVector< K2, rows, cols > Traits;
+ typedef Dune::FieldMatrix< K, rows, cols > FMatrix;
+
+#if __GNUC__ != 5 || defined(__clang__)
+ static_assert(
+ !std::is_trivially_copyable<K>::value || std::is_trivially_copyable<FMatrix>::value,
+ "FieldMatrix<T, ...> must be trivally copyable type when T is trivial type"
+ );
+#endif
+ static_assert(
+ std::is_standard_layout<FMatrix>::value,
+ "FieldMatrix<...> must be a standard layout type"
+ );
+
+ FMatrix m( 1 );
+ checkMatrixInterface< FMatrix >( m );
+ checkMatrixInterface< FMatrix, Traits >( m );
+}
+
+void test_initialisation()
+{
+ [[maybe_unused]] Dune::FieldMatrix<int, 2, 2> const A = {
+ { 1, 2 },
+ { 3, 4 }
+ };
+
+ assert(A[0][0] == 1);
+ assert(A[0][1] == 2);
+ assert(A[1][0] == 3);
+ assert(A[1][1] == 4);
+}
+
+int main()
+{
+ try {
+ int errors = 0; // counts errors
+
+ static_assert(
+ std::is_same< Dune::FieldMatrix<double, 3, 3>, Dune::FieldMatrix<double, 3> >::value,
+ "default parameter for square matrices"
+ );
+
+ {
+ double nan = std::nan("");
+ test_nan(nan);
+ }
+ {
+ std::complex<double> nan( std::nan(""), 17 );
+ test_nan(nan);
+ }
+ test_infinity_norms();
+ test_initialisation();
+
+ // test 1 x 1 matrices
+ test_interface<float, float, 1, 1>();
+ test_matrix<float, float, float, 1, 1>();
+ ScalarOperatorTest<float>();
+ test_matrix<double, double, double, 1, 1>();
+ ScalarOperatorTest<double>();
+#if HAVE_QUADMATH
+ test_matrix<Dune::Float128, Dune::Float128, Dune::Float128, 1, 1>();
+ ScalarOperatorTest<Dune::Float128>();
+#endif
+ // test n x m matrices
+ test_interface<int, int, 10, 5>();
+ test_matrix<int, int, int, 10, 5>();
+ test_matrix<double, double, double, 5, 10>();
+ test_interface<double, double, 5, 10>();
+#if HAVE_QUADMATH
+ test_matrix<Dune::Float128, Dune::Float128, Dune::Float128, 5, 10>();
+ test_interface<Dune::Float128, Dune::Float128, 5, 10>();
+#endif
+ // mixed precision
+ test_interface<float, float, 5, 10>();
+ test_matrix<float, double, float, 5, 10>();
+#if HAVE_QUADMATH
+ test_matrix<float, double, Dune::Float128, 5, 10>();
+#endif
+ // test complex matrices
+ test_matrix<std::complex<float>, std::complex<float>, std::complex<float>, 1, 1>();
+ test_matrix<std::complex<double>, std::complex<double>, std::complex<double>, 5, 10>();
+ // test complex/real matrices mixed case
+ test_matrix<float, std::complex<float>, std::complex<float>, 1, 1>();
+ test_matrix<std::complex<float>, float, std::complex<float>, 1, 1>();
+#if HAVE_LAPACK
+ // test eigemvalue computation
+ test_ev<double>();
+#endif
+ // test high level methods
+ errors += test_determinant< double >();
+#if HAVE_VC
+ errors += test_determinant< Vc::SimdArray<double, 8> >();
+#endif
+
+ //test LoopSIMD stuff
+ errors += test_determinant< Dune::LoopSIMD<double, 8> >();
+
+ test_invert< float, 34 >();
+ test_invert< double, 34 >();
+ test_invert< std::complex< long double >, 2 >();
+ test_invert< std::complex< float >, 2 >();
+ errors += test_invert_solve();
+
+ { // Test whether multiplying one-column matrices by scalars work
+ FieldMatrix<double,3,1> A = {1,2,3};
+ double v = 0;
+ FieldVector<double,3> f = {2,3,4};
+ double vT = 0;
+ FieldVector<double,3> fT = {3,4,5};
+ test_mult(A, v, f, vT, fT);
+ }
+
+ { // Test whether result of multiplying a one-row matrix can be a scalar
+ FieldMatrix<double,1,3> A = {{1,2,3}};
+ FieldVector<double,3> v = {2,3,4};
+ double f = 0;
+ FieldVector<double,3> vT = {3,4,5};
+ double fT = 0;
+ test_mult(A, v, f, vT, fT);
+ }
+
+ { // Test multiplication of 1x1 matrix with scalars
+ FieldMatrix<double,1,1> A = {42};
+ double v = 0;
+ double f = 2;
+ double vT = 0;
+ double fT = 5;
+ test_mult(A, v, f, vT, fT);
+ }
+
+ return (errors > 0 ? 1 : 0); // convert error count to unix exit status
+ }
+ catch (Dune::Exception & e)
+ {
+ std::cerr << "Exception: " << e << std::endl;
+ return 1;
+ }
+}
--- /dev/null
+#include "config.h"
+
+#include <type_traits>
+
+
+#include <dune/common/deprecated.hh>
+#define DUNE_FUNCTION_HH_SILENCE_DEPRECATION
+#include <dune/common/function.hh>
+#include <dune/common/test/testsuite.hh>
+
+int main()
+{
+ Dune::TestSuite t;
+
+ DUNE_NO_DEPRECATED_BEGIN
+ {
+ auto f = Dune::makeVirtualFunction<int, long>(
+ [](int x) -> long { return x*x; });
+
+ static_assert(
+ std::is_base_of< Dune::VirtualFunction<int, long>, decltype(f) >::value,
+ "makeVirtualFunction() must return type derived from VirtualFunction");
+
+ long y;
+ f.evaluate(2, y);
+ t.check(y == 4);
+ }
+
+ {
+ auto f1 = [](int x) -> long { return x*x; };
+ auto f = Dune::makeVirtualFunction<int, long>(f1);
+
+ static_assert(
+ std::is_base_of< Dune::VirtualFunction<int, long>, decltype(f) >::value,
+ "makeVirtualFunction() must return type derived from VirtualFunction");
+
+ long y;
+ f.evaluate(2, y);
+ t.check(y == 4);
+ }
+ DUNE_NO_DEPRECATED_END
+
+ return t.exit();
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <iostream>
+#include <algorithm>
+
+#include <dune/common/densevector.hh>
+#include <dune/common/fvector.hh>
+#include <dune/common/fmatrix.hh>
+#include <dune/common/exceptions.hh>
+
+//! @file
+/**
+ * This test tests for a regression, where `std::is_assignable` would return
+ * `true` for certain assignments, but it was not actually possible to
+ * instantiate those assignments. In the fix `std::is_assignable` was fixed
+ * to report false, and that is what is checked now.
+ */
+
+template<class Component, std::size_t Dim >
+class MyVector;
+
+namespace Dune
+{
+ template<class Component, std::size_t Dim>
+ struct DenseMatVecTraits< MyVector<Component, Dim> >
+ {
+ using derived_type = MyVector<Component, Dim>;
+ using value_type = Component;
+ using size_type = std::size_t;
+ };
+
+ template<class Component, std::size_t Dim, int Size>
+ struct IsFieldVectorSizeCorrect<MyVector<Component, Dim>, Size>
+ : std::integral_constant<bool, Dim == Size>
+ {};
+}
+
+template<class Component, std::size_t Dim >
+class MyVector : public Dune::DenseVector< MyVector<Component, Dim> >
+{
+ public:
+ static constexpr std::size_t size () { return Dim; }
+
+ Component& operator[] ( std::size_t i ) { return data_; }
+ const Component& operator[] ( std::size_t i ) const { return data_; }
+ protected:
+ Component data_;
+};
+
+int main()
+{
+ try
+ {
+ // Pure 1d case. Here OuterMV is assignable to MiddleFV as the the
+ // 1d FieldVector implements a type-case to the underlying
+ // field. This is expected behaviour.
+ {
+ using InnerFV = Dune::FieldVector<double, 1>;
+ using MiddleFV = Dune::FieldVector<InnerFV, 1>;
+ using OuterFV = Dune::FieldVector<MiddleFV, 1>;
+
+ using MiddleMV = MyVector<InnerFV, 1>;
+ using OuterMV = MyVector<MiddleMV, 1>;
+
+ MiddleFV mfv;
+ OuterMV mv;
+ OuterFV fv;
+
+ static_assert(std::is_convertible<OuterMV, OuterFV>::value,
+ "DenseVectors should be convertible.");
+ fv = mv;
+
+ static_assert(std::is_assignable<MiddleFV&, OuterMV>::value,
+ "Reduced assignability detected.");
+ mfv = mv;
+ }
+
+ // The following would trigger a problem in the DenseVector
+ // operator=() which was cured by first checking whether the
+ // value_types are assignable.
+ {
+ using InnerFV = Dune::FieldVector<double, 2>;
+ using MiddleFV = Dune::FieldVector<InnerFV, 1>;
+ using OuterFV = Dune::FieldVector<MiddleFV, 1>;
+
+ using MiddleMV = MyVector<InnerFV, 1>;
+ using OuterMV = MyVector<MiddleMV, 1>;
+
+ // MiddleFV mfv;
+ OuterMV mv;
+ OuterFV fv;
+
+ static_assert(std::is_convertible<OuterMV, OuterFV>::value,
+ "DenseVectors should be convertible.");
+ fv = mv;
+
+ // before the fix, `is_assignable` returned `true`,
+ static_assert(!std::is_assignable<MiddleFV&, OuterMV>::value,
+ "Inconsistent assignability detected.");
+ // mfv = mv; // <- but this assignment failed instantiation
+ }
+
+ {
+ using InnerFV = Dune::FieldMatrix<double, 2, 2>;
+ using MiddleFV = Dune::FieldVector<InnerFV, 1>;
+ using OuterFV = Dune::FieldVector<MiddleFV, 1>;
+
+ using MiddleMV = MyVector<InnerFV, 1>;
+ using OuterMV = MyVector<MiddleMV, 1>;
+
+ // MiddleFV mfv;
+ OuterMV mv;
+ OuterFV fv;
+
+ static_assert(std::is_assignable<OuterFV, OuterMV>::value,
+ "DenseVectors should be assignable.");
+ fv = mv;
+
+ // before the fix, `is_assignable` returned `true`,
+ static_assert(!std::is_assignable<MiddleFV&, OuterMV>::value,
+ "Inconsistent assignability detected.");
+ // mfv = mv; // <- but this assignment failed instantiation
+ }
+ return 0;
+ } catch (Dune::Exception& e) {
+ std::cerr << e << std::endl;
+ return 1;
+ } catch (...) {
+ std::cerr << "Generic exception!" << std::endl;
+ return 2;
+ }
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <complex>
+#include <iostream>
+#include <limits>
+#include <memory>
+#include <typeinfo>
+#include <type_traits>
+
+#include <dune/common/classname.hh>
+#include <dune/common/exceptions.hh>
+#include <dune/common/fvector.hh>
+#include <dune/common/gmpfield.hh>
+#include <dune/common/quadmath.hh>
+#include <dune/common/typetraits.hh>
+
+struct FVectorTestException : Dune::Exception {};
+
+#define FVECTORTEST_ASSERT(EXPR) \
+ if(!(EXPR)) { \
+ DUNE_THROW(FVectorTestException, \
+ "Test assertion " << #EXPR << " failed"); \
+ } \
+ static_assert(true, "enforce terminating ;")
+
+
+using Dune::FieldVector;
+using std::complex;
+
+// Tests that can be run without the construction of complex<rt>
+template<class ft, class rt, int d>
+struct FieldVectorMainTestCommons
+{
+ FieldVectorMainTestCommons() {
+#if __GNUC__ != 5 || defined(__clang__)
+ static_assert(
+ !std::is_trivially_copyable<ft>::value || std::is_trivially_copyable< FieldVector<ft, d> >::value,
+ "FieldVector<T, ...> must be a trivially copyable type when T is a trivial type"
+ );
+#endif
+ static_assert(
+ std::is_standard_layout< FieldVector<ft, d> >::value,
+ "FieldVector<...> must be a standard layout type"
+ );
+
+ ft a = 1;
+ FieldVector<ft,d> v(1);
+ FieldVector<ft,d> w(2);
+ FieldVector<ft,d> z(2);
+ const FieldVector<ft,d> x(z);
+ if (x.size()>0)
+ a = x[0];
+ [[maybe_unused]] bool b;
+ [[maybe_unused]] rt n;
+
+ std::cout << __func__ << "\t ( " << className(v) << " )" << std::endl;
+
+ // test exported types
+ static_assert(
+ std::is_same<ft,typename FieldVector<ft,d>::value_type>::value,
+ "FieldVector::value_type is not the correct type"
+ );
+
+ // test traits
+ static_assert(
+ ( std::is_same< typename Dune::FieldTraits<
+ FieldVector<ft,d> >::field_type, ft >::value ),
+ "FieldTraits<FieldVector> yields wrong field_type"
+ );
+ static_assert(
+ ( std::is_same< typename Dune::FieldTraits<ft>::real_type, rt >::value ),
+ "FieldTraits<field_type> yields wrong real_type"
+ );
+ static_assert(
+ ( std::is_same< typename Dune::FieldTraits<
+ FieldVector<ft,d> >::real_type, rt >::value ),
+ "FieldTraits<FieldVector> yields wrong real_type"
+ );
+
+ // Test whether the norm methods compile
+ n = (w+v).two_norm();
+ n = (w+v).two_norm2();
+ n = (w+v).one_norm();
+ n = (w+v).one_norm_real();
+ n = (w+v).infinity_norm();
+ n = (w+v).infinity_norm_real();
+
+ // test op(vec,vec)
+ z = v + w;
+ z = v - w;
+ [[maybe_unused]] FieldVector<ft,d> z2 = v + w;
+ w -= v;
+ w += v;
+
+ // test op(vec,scalar)
+ w +=a;
+ w -= a;
+ w *= a;
+ w /= a;
+ w = a * v;
+ w = v * a;
+ w = v / a;
+
+ // Negation
+ -v;
+
+ // test scalar product, axpy
+ a = v * w;
+ a = v.dot(w);
+ z = v.axpy(a,w);
+
+ // test comparison
+ b = (w != v);
+ b = (w == v);
+
+ // test istream operator
+ std::stringstream s;
+ for (int i=0; i<d; i++)
+ {
+ s << i << " ";
+ v[i] = i;
+ }
+ s >> w;
+ FVECTORTEST_ASSERT(v == w);
+
+ // test container methods
+ typename FieldVector<ft,d>::size_type size = FieldVector<ft,d>::dimension;
+ FVECTORTEST_ASSERT(size == w.size());
+
+ if (w.size() > 0) {
+ FVECTORTEST_ASSERT(!w.empty());
+ FVECTORTEST_ASSERT(std::addressof(w[0]) == std::addressof(w.front()));
+ FVECTORTEST_ASSERT(std::addressof(w[0]) == w.data());
+ FVECTORTEST_ASSERT(std::addressof(w[d-1]) == std::addressof(w.back()));
+ }
+ }
+};
+
+// Additional tests for floating point types, for which complex<rt> will work
+template<class ft, class rt, int d,
+ bool floating_point = std::is_floating_point<rt>::value>
+struct FieldVectorMainTest
+ : FieldVectorMainTestCommons<ft,rt,d>
+{
+ FieldVectorMainTest()
+ : FieldVectorMainTestCommons<ft,rt,d>()
+ {
+ ft a = 1;
+ FieldVector<ft,d> v(1);
+ FieldVector<ft,d> z(2);
+ const FieldVector<ft,d> x(z);
+
+ // assignment to vector of complex
+ FieldVector< std::complex<rt> ,d> cv = v;
+ cv = a;
+ [[maybe_unused]] const FieldVector< std::complex<rt> ,d> ccv = x;
+ }
+};
+
+template<class ft, class rt, int d>
+struct FieldVectorMainTest<ft,rt,d,false>
+ : FieldVectorMainTestCommons<ft,rt,d>
+{
+ FieldVectorMainTest()
+ : FieldVectorMainTestCommons<ft,rt,d>()
+ {}
+};
+
+template<class ft, class testft=ft>
+struct ScalarOperatorTest
+{
+ ScalarOperatorTest()
+ {
+ // testft has to initializable with an int
+ testft a = 1;
+ testft c = 2;
+ FieldVector<ft,1> v(2);
+ FieldVector<ft,1> w(2);
+ [[maybe_unused]] bool b;
+
+ std::cout << __func__ << "\t ( " << className(v) << " )" << std::endl;
+
+ a = a * c;
+ a = a + c;
+ a = a / c;
+ a = a - c;
+
+ v = a;
+ v = w = v;
+ a = v;
+
+ a = v + a;
+ a = v - a;
+ a = v * a;
+ a += 1; // make sure a!=0
+ a = v / a;
+
+ v = v + a;
+ v = v - a;
+ v = v * a;
+ a += 1; // make sure a!=0
+ v = v / a;
+
+ a = a + v;
+ a = a - v;
+ a = a * v;
+ v += 1; // make sure v!=0
+ a = a / v;
+
+ v = a + v;
+ v = a - v;
+ v = a * v;
+ v += 1; // make sure v!=0
+ v = a / v;
+
+ v -= w;
+ v -= a;
+ v += w;
+ v += a;
+ v *= a;
+ a += 1; // make sure a!=0
+ v /= a;
+
+ b = (v == a);
+ b = (v != a);
+ b = (a == v);
+ b = (a != v);
+
+ }
+};
+
+// scalar ordering doesn't work for complex numbers
+template<class ft>
+struct ScalarOrderingTest
+{
+ ScalarOrderingTest()
+ {
+ ft a = 1;
+ ft c = 2;
+ FieldVector<ft,1> v(2);
+ FieldVector<ft,1> w(2);
+ [[maybe_unused]] bool b;
+
+ std::cout << __func__ << "\t ( " << className(v) << " )" << std::endl;
+
+ b = (a < c);
+ b = (a <= c);
+ b = (a >= c);
+ b = (a > c);
+
+ b = (v == a);
+ b = (v != a);
+ b = (a == v);
+ b = (a != v);
+
+ b = (v < a);
+ b = (v <= a);
+ b = (v >= a);
+ b = (v > a);
+
+ b = (v < w);
+ b = (v <= w);
+ b = (v >= w);
+ b = (v > w);
+
+ b = (a < w);
+ b = (a <= w);
+ b = (a >= w);
+ b = (a > w);
+ }
+};
+
+template<typename T>
+struct Epsilon
+{
+ static T value() { return T(1e-6); }
+};
+
+template<>
+struct Epsilon<int>
+{
+ static int value() { return 0; }
+};
+
+// scalar ordering doesn't work for complex numbers
+template <class rt, int d,
+ bool floating_point = std::is_floating_point<rt>::value>
+struct DotProductTest
+{
+ DotProductTest() {
+ typedef std::complex<rt> ct;
+ [[maybe_unused]] const rt myEps = Epsilon<rt>::value();
+
+ static_assert(
+ ( std::is_same< typename Dune::FieldTraits<rt>::real_type, rt>::value ),
+ "DotProductTest requires real data type as template parameter!"
+ );
+
+ const ct I(0.,1.); // imaginary unit
+ const FieldVector<rt,d> one(1.); // vector filled with 1
+ const FieldVector<ct,d> iVec(ct(0.,1.)); // vector filled with I
+
+ std::cout << __func__ << "\t \t ( " << Dune::className(one) << " and " << Dune::className(iVec) << ")" << std::endl;
+
+ const bool isRealOne = std::is_same<typename Dune::FieldTraits<rt>::field_type,typename Dune::FieldTraits<rt>::real_type>::value;
+ const bool isRealIVec = std::is_same<typename Dune::FieldTraits<ct>::field_type,typename Dune::FieldTraits<ct>::real_type> ::value;
+ static_assert(isRealOne,"1-vector expected to be real");
+ static_assert(!isRealIVec,"i-vector expected to be complex");
+
+ ct result = ct();
+ ct length = ct(d);
+
+
+ // one^H*one should equal d
+ result = dot(one,one);
+ FVECTORTEST_ASSERT(std::abs(result-length)<= myEps);
+ result = one.dot(one);
+ FVECTORTEST_ASSERT(std::abs(result-length)<= myEps);
+
+
+ // iVec^H*iVec should equal d
+ result = dot(iVec,iVec);
+ FVECTORTEST_ASSERT(std::abs(result-length)<= myEps);
+ result = iVec.dot(iVec);
+ FVECTORTEST_ASSERT(std::abs(result-length)<= myEps);
+
+
+ // test that we do conjugate first argument
+ result = dot(one,iVec);
+ FVECTORTEST_ASSERT(std::abs(result-length*I)<= myEps);
+ result = dot(one,iVec);
+ FVECTORTEST_ASSERT(std::abs(result-length*I)<= myEps);
+
+
+ // test that we do not conjugate second argument
+ result = dot(iVec,one);
+ FVECTORTEST_ASSERT(std::abs(result+length*I)<= myEps);
+ result = iVec.dot(one);
+ FVECTORTEST_ASSERT(std::abs(result+length*I)<= myEps);
+
+
+ // test that dotT does not conjugate at all
+ result = dotT(one,one) + one*one;
+ FVECTORTEST_ASSERT(std::abs(result-ct(2)*length)<= myEps);
+ result = dotT(iVec,iVec) + iVec*iVec;
+ FVECTORTEST_ASSERT(std::abs(result+ct(2)*length)<= myEps);
+ result = dotT(one,iVec) + one*iVec;
+ FVECTORTEST_ASSERT(std::abs(result-ct(2)*length*I)<= myEps);
+ result = dotT(iVec,one) + iVec*one;
+ FVECTORTEST_ASSERT(std::abs(result-ct(2)*length*I)<= myEps);
+
+ }
+};
+
+// scalar ordering doesn't work for complex numbers
+template <class rt, int d>
+struct DotProductTest<rt, d, false>
+{
+ DotProductTest() {
+ [[maybe_unused]] const rt myEps = Epsilon<rt>::value();
+
+ static_assert(
+ ( std::is_same< typename Dune::FieldTraits<rt>::real_type, rt>::value ),
+ "DotProductTest requires real data type as template parameter!"
+ );
+
+ const FieldVector<rt,d> one(1.); // vector filled with 1
+
+ std::cout << __func__ << "\t \t ( " << Dune::className(one) << " only)" << std::endl;
+
+ const bool isRealOne = std::is_same<typename Dune::FieldTraits<rt>::field_type,typename Dune::FieldTraits<rt>::real_type>::value;
+ static_assert(isRealOne,"1-vector expected to be real");
+
+ rt result = rt();
+ rt length = rt(d);
+
+ // one^H*one should equal d
+ result = dot(one,one);
+ FVECTORTEST_ASSERT(abs(result-length)<= myEps);
+ result = one.dot(one);
+ FVECTORTEST_ASSERT(abs(result-length)<= myEps);
+
+ // test that dotT does not conjugate at all
+ result = dotT(one,one) + one*one;
+ FVECTORTEST_ASSERT(abs(result-rt(2)*length)<= myEps);
+ }
+};
+
+template<class ft, int d,
+ bool floating_point = std::is_floating_point<ft>::value>
+struct FieldVectorTest
+{
+ FieldVectorTest()
+ {
+ // --- test complex and real valued vectors
+ FieldVectorMainTest<ft,ft,d>();
+ FieldVectorMainTest<complex<ft>,ft,d>();
+ DotProductTest<ft,d>();
+ // --- test next lower dimension
+ FieldVectorTest<ft,d-1>();
+ }
+};
+
+// specialisation for non-floating-point vectors
+template<class ft, int d>
+struct FieldVectorTest<ft, d, false>
+{
+ FieldVectorTest()
+ {
+ // --- test real valued vectors
+ FieldVectorMainTest<ft,ft,d>();
+ DotProductTest<ft,d>();
+ // --- test next lower dimension
+ FieldVectorTest<ft,d-1>();
+ }
+};
+
+// specialization for 1d floating point vector
+template<class ft>
+class FieldVectorTest<ft,1,true>
+{
+public:
+ FieldVectorTest()
+ {
+ // --- real valued
+ FieldVectorMainTest<ft,ft,1>();
+ ScalarOperatorTest<ft>();
+ ScalarOrderingTest<ft>();
+ DotProductTest<ft,1>();
+ // --- complex valued
+ FieldVectorMainTest<complex<ft>,ft,1>();
+ ScalarOperatorTest< complex<ft> >();
+ // ordering doesn't work for complex numbers
+
+ // --- test with an integer
+ ScalarOperatorTest< ft, int >();
+ // --- test next lower dimension
+ FieldVectorMainTest<ft,ft,0>();
+ }
+};
+
+// specialization for other 1d vectors
+template<class ft>
+class FieldVectorTest<ft,1,false>
+{
+public:
+ FieldVectorTest()
+ {
+ // --- real valued
+ FieldVectorMainTest<ft,ft,1>();
+ ScalarOperatorTest<ft>();
+ ScalarOrderingTest<ft>();
+ DotProductTest<ft,1>();
+
+ // --- test with an integer
+ ScalarOperatorTest< ft, int >();
+ // --- test next lower dimension
+ FieldVectorMainTest<ft,ft,0>();
+ }
+};
+
+template <class V>
+void checkNormNAN(V const &v, int line) {
+ if (!std::isnan(v.one_norm())) {
+ std::cerr << "error: norm not NaN: one_norm() on line "
+ << line << " (type: " << Dune::className(v[0]) << ")"
+ << std::endl;
+ std::exit(-1);
+ }
+ if (!std::isnan(v.two_norm())) {
+ std::cerr << "error: norm not NaN: two_norm() on line "
+ << line << " (type: " << Dune::className(v[0]) << ")"
+ << std::endl;
+ std::exit(-1);
+ }
+ if (!std::isnan(v.infinity_norm())) {
+ std::cerr << "error: norm not NaN: infinity_norm() on line "
+ << line << " (type: " << Dune::className(v[0]) << ")"
+ << std::endl;
+ std::exit(-1);
+ }
+}
+
+// Make sure that vectors with NaN entries have norm NaN.
+// See also bug flyspray/FS#1147
+template <typename T>
+void
+test_nan(T const &mynan)
+{
+ {
+ Dune::FieldVector<T, 2> v = { mynan, mynan };
+ checkNormNAN(v, __LINE__);
+ }
+ {
+ Dune::FieldVector<T, 2> v = { mynan, 0 };
+ checkNormNAN(v, __LINE__);
+ }
+ {
+ Dune::FieldVector<T, 2> v = { 0, mynan };
+ checkNormNAN(v, __LINE__);
+ }
+}
+
+void
+test_infinity_norms()
+{
+ std::complex<double> threefour(3.0, -4.0);
+ std::complex<double> eightsix(8.0, -6.0);
+
+ Dune::FieldVector<std::complex<double>, 2> v;
+ v[0] = threefour;
+ v[1] = eightsix;
+ FVECTORTEST_ASSERT(std::abs(v.infinity_norm() -10.0) < 1e-10); // max(5,10)
+ FVECTORTEST_ASSERT(std::abs(v.infinity_norm_real()-14.0) < 1e-10); // max(7,14)
+}
+
+void
+test_initialisation()
+{
+ [[maybe_unused]] Dune::FieldVector<int, 2> const b = { 1, 2 };
+
+ FVECTORTEST_ASSERT(b[0] == 1);
+ FVECTORTEST_ASSERT(b[1] == 2);
+}
+
+void fieldvectorMathclassifiersTest() {
+ double nan = std::nan("");
+ double inf = std::numeric_limits<double>::infinity();
+
+ FieldVector<double,3> fv_normal(1.);
+ FieldVector<double,3> fv_nan(1.);
+ FieldVector<double,3> fv_inf(1.);
+
+ fv_nan[2] = nan;
+ fv_inf[2] = inf;
+
+ //test vector containing only doubles
+ if(Dune::isNaN(fv_normal) == true) {
+ std::abort();
+ }
+ if(Dune::isInf(fv_normal) == true) {
+ std::abort();
+ }
+ if(Dune::isFinite(fv_normal) == false) {
+ std::abort();
+ }
+
+ //test vector containing a NaN-entry
+ if(Dune::isNaN(fv_nan) == false) {
+ std::abort();
+ }
+ if(Dune::isInf(fv_nan) == true) {
+ std::abort();
+ }
+ if(Dune::isFinite(fv_nan) == true) {
+ std::abort();
+ }
+
+ //test vector containing an infinity-entry
+ if(Dune::isNaN(fv_inf) == true) {
+ std::abort();
+ }
+ if(Dune::isInf(fv_inf) == false) {
+ std::abort();
+ }
+ if(Dune::isFinite(fv_inf) == true) {
+ std::abort();
+ }
+}
+
+
+int main()
+{
+ {
+ FieldVectorTest<int, 1>();
+ FieldVectorTest<float, 1>();
+ FieldVectorTest<double, 1>();
+ FieldVectorTest<long double, 1>();
+ FieldVectorTest<int, 3>();
+ FieldVectorTest<float, 3>();
+ FieldVectorTest<double, 3>();
+ FieldVectorTest<long double, 3>();
+#if HAVE_GMP
+ {
+ // we skip the complex test and the int test, as these will be very hard to implement with GMPField
+ typedef Dune::GMPField<128u> ft;
+ FieldVectorMainTest<ft,ft,3>();
+ FieldVectorMainTest<ft,ft,2>();
+ FieldVectorMainTest<ft,ft,1>();
+ FieldVectorMainTest<ft,ft,0>();
+ ScalarOperatorTest<ft>();
+ ScalarOrderingTest<ft>();
+ DotProductTest<ft,3>();
+ }
+#endif // HAVE_GMP
+
+#if HAVE_QUADMATH
+ {
+ // we skip the int test, as these will be very hard to implement with Float128
+ typedef Dune::Float128 ft;
+ FieldVectorMainTest<ft,ft,3>();
+ FieldVectorMainTest<ft,ft,2>();
+ FieldVectorMainTest<ft,ft,1>();
+ FieldVectorMainTest<ft,ft,0>();
+ ScalarOperatorTest<ft>();
+ ScalarOrderingTest<ft>();
+ DotProductTest<ft,3>();
+ }
+#endif
+
+ //test the mathclassifiers Dune::isNaN, Dune::isInf, Dune::isFinite
+ fieldvectorMathclassifiersTest();
+
+ {
+ double nan = std::nan("");
+ test_nan(nan);
+ }
+ {
+ std::complex<double> nan( std::nan(""), 17 );
+ test_nan(nan);
+ }
+ test_infinity_norms();
+ test_initialisation();
+ }
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <dune/common/test/iteratorfacadetest.hh>
+#include <dune/common/test/iteratortest.hh>
+
+int main(){
+ // Test the TestIterator;
+ typedef TestContainer<double, Dune::BidirectionalIteratorFacade> Container;
+ Container bidicontainer;
+
+ Container::const_iterator cit = bidicontainer.begin();
+ //This should fail since making a mutable iterator from a const iterator
+ //discard qualifiers
+ [[maybe_unused]] Container::iterator it;
+ it = cit;
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <tuple>
+#include <vector>
+
+#include <dune/common/hybridutilities.hh>
+#include <dune/common/tuplevector.hh>
+#include <dune/common/test/testsuite.hh>
+
+
+
+template<class C>
+auto incrementAll(C&& c)
+{
+ using namespace Dune::Hybrid;
+ forEach(c, [](auto&& ci) {
+ ++ci;
+ });
+}
+
+template<class C>
+auto addIndex(C&& c)
+{
+ using namespace Dune::Hybrid;
+ forEach(integralRange(Dune::Hybrid::size(c)), [&](auto&& i) {
+ c[i] += i;
+ });
+}
+
+template<class C>
+auto incAndAppendToFirst(C&& c)
+{
+ using namespace Dune::Hybrid;
+ forEach(integralRange(Dune::Hybrid::size(c)), [&](auto&& i) {
+ using namespace Dune::Hybrid;
+ using namespace Dune::Indices;
+ ifElse(equals(i, _0), [&](auto id) {
+ id(c[i]).append("+1");
+ }, [&](auto id) {
+ ++id(c[i]);
+ });
+ });
+}
+
+template<class C>
+constexpr auto sum(C&& c)
+{
+ using namespace Dune::Hybrid;
+ using namespace Dune::Indices;
+ return accumulate(c, 0.0, [](auto&& a, auto&& b) {
+ return a+b;
+ });
+}
+
+template<class C, class I>
+auto sumSubsequence(C&& c, I&& indices)
+{
+ using namespace Dune::Hybrid;
+ double result = 0;
+ forEach(indices, [&](auto i) {
+ result += Dune::Hybrid::elementAt(c, i);
+ });
+ return result;
+}
+
+
+
+int main()
+{
+ auto vector = std::vector<int>{1, 2, 3};
+ auto numberTuple = Dune::makeTupleVector(0.1, 2, 3);
+
+ Dune::TestSuite test;
+
+ incrementAll(vector);
+ test.check(vector == std::vector<int>{2, 3, 4})
+ << "Incrementing vector entries with Hybrid::forEach failed.";
+
+ incrementAll(numberTuple);
+ test.check(numberTuple == Dune::makeTupleVector(1.1, 3, 4))
+ << "Incrementing tuple entries with Hybrid::forEach failed.";
+
+ addIndex(vector);
+ test.check(vector == std::vector<int>{2, 4, 6})
+ << "Adding indices to vector entries with Hybrid::forEach failed.";
+
+ addIndex(numberTuple);
+ test.check(numberTuple == Dune::makeTupleVector(1.1, 4, 6))
+ << "Adding indices to vector entries with Hybrid::forEach failed.";
+
+
+ auto mixedTuple = Dune::makeTupleVector(std::string("1"), 2, 3);
+ incAndAppendToFirst(mixedTuple);
+ test.check(mixedTuple == Dune::makeTupleVector(std::string("1+1"), 3, 4))
+ << "Adding indices to vector entries with Hybrid::forEach failed.";
+
+ constexpr auto values = std::make_integer_sequence<std::size_t, 30>();
+ test.check((30*29)/2 == sum(values))
+ << "accumulate() yields incorrect result.";
+
+ test.check((29*28)/2 == sumSubsequence(values, std::make_integer_sequence<std::size_t, 29>()))
+ << "Summing up subsequence failed.";
+
+ // Compile time checks
+ static_assert(sum(values) == (30*29)/2, "Wrong compile time sum!");
+ constexpr auto numberTupleConstexpr = Dune::makeTupleVector(0.1, 2, 3);
+ static_assert(sum(numberTupleConstexpr) == 5.1, "Wrong compile time sum!");
+
+ return test.exit();
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <tuple>
+
+#include <dune/common/indices.hh>
+#include <dune/common/tuplevector.hh>
+
+using namespace Dune;
+
+
+
+int main()
+{
+ using namespace Dune::Indices;
+
+ // Test whether indices can be used to index a data structure
+ Dune::TupleVector<int,double,float> v;
+ v[_0] = 42;
+ v[_1] = 3.14;
+ v[_2] = 2.7;
+
+ // Test whether the indices can be used as numbers
+ std::get<_0>(v) = 43;
+ std::get<_1>(v) = 4.14;
+ std::get<_2>(v) = 3.7;
+
+ return 0;
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <dune/common/typetraits.hh>
+#include <dune/common/test/testsuite.hh>
+
+int main()
+{
+ Dune::TestSuite test;
+
+ {
+ auto f = [](int /*i*/) { return 0; };
+ using F = decltype(f);
+
+ test.check(Dune::IsCallable<F(int)>() == true)
+ << "Dune::IsCallable does not accept copy from r-value";
+ test.check(Dune::IsCallable<F(int&)>() == true)
+ << "Dune::IsCallable does not accept copy from l-value reference";
+ test.check(Dune::IsCallable<F(int&&)>() == true)
+ << "Dune::IsCallable does not accept copy from r-value reference";
+
+ test.check(Dune::IsCallable<F(std::string)>() == false)
+ << "Dune::IsCallable accepts invalid argument type";
+ test.check(Dune::IsCallable<F(int, int)>() == false)
+ << "Dune::IsCallable accepts invalid argument count";
+ test.check(Dune::IsCallable<F(int), int>() == true)
+ << "Dune::IsCallable does not accept valid return type";
+ test.check(Dune::IsCallable<F(int), std::string>() == false)
+ << "Dune::IsCallable accepts invalid return type";
+ }
+
+ {
+ auto f = [](const int& /*i*/) {};
+ using F = decltype(f);
+
+ test.check(Dune::IsCallable<F(int)>() == true)
+ << "Dune::IsCallable does not accept const& temporary from r-value";
+ test.check(Dune::IsCallable<F(int&)>() == true)
+ << "Dune::IsCallable does not accept const& temporary from l-value reference";
+ test.check(Dune::IsCallable<F(int&&)>() == true)
+ << "Dune::IsCallable does not accept const& temporary from r-value reference";
+ }
+
+ {
+ auto f = [](int& /*i*/) {};
+ using F = decltype(f);
+
+ test.check(Dune::IsCallable<F(int)>() == false)
+ << "Dune::IsCallable accepts l-value reference from r-value";
+ test.check(Dune::IsCallable<F(int&)>() == true)
+ << "Dune::IsCallable does not accept l-value reference from l-value reference";
+ test.check(Dune::IsCallable<F(int&&)>() == false)
+ << "Dune::IsCallable accepts l-value reference from r-value reference";
+ }
+
+ {
+ auto f = [](int&& /*i*/) {};
+ using F = decltype(f);
+
+ test.check(Dune::IsCallable<F(int)>() == true)
+ << "Dune::IsCallable does not accept r-value reference from r-value";
+ test.check(Dune::IsCallable<F(int&)>() == false)
+ << "Dune::IsCallable accepts r-value reference from l-value reference";
+ test.check(Dune::IsCallable<F(int&&)>() == true)
+ << "Dune::IsCallable does not accept r-value reference from r-value reference";
+ }
+
+ return test.exit();
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <dune/common/test/iteratorfacadetest.hh>
+#include <dune/common/test/iteratortest.hh>
+#include <iostream>
+#include <algorithm>
+
+template<class Container>
+void randomize(Container& cont){
+ srand(300);
+ double size=1000;
+
+ for(int i=0; i < 100; i++) {
+ cont[i] = (size*(rand()/(RAND_MAX+1.0)));
+
+ }
+}
+
+template<class Container>
+void print(Container& cont){
+ for(int i=0; i < 100; i++)
+ std::cout<<cont[i]<<" ";
+ std::cout<<std::endl;
+}
+
+template<class Container>
+int containerTest(Container & container)
+{
+ randomize(container);
+ // print(container);
+ //std::sort(container.begin(), container.end());
+ //print(container);
+
+ const Container ccontainer(container);
+ int ret=0;
+ Printer<const double> print;
+ ret += testIterator(container, print);
+ ret += testIterator(ccontainer, print);
+
+ return ret;
+}
+
+int main(){
+ // Test the TestIterator;
+ TestContainer<double, Dune::ForwardIteratorFacade> forwardcontainer;
+ TestContainer<double, Dune::BidirectionalIteratorFacade> bidicontainer;
+ TestContainer<double, Dune::RandomAccessIteratorFacade> randomcontainer;
+
+ int ret=0;
+
+ ret += containerTest(forwardcontainer);
+ ret += containerTest(bidicontainer);
+ ret += containerTest(randomcontainer);
+
+ return (ret);
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_ITERATORFACADETEST_HH
+#define DUNE_ITERATORFACADETEST_HH
+#include <dune/common/iteratorfacades.hh>
+#include <dune/common/genericiterator.hh>
+#include <dune/common/typetraits.hh>
+
+template<class T,
+ template<class,class,class,class> class IteratorFacade=Dune::RandomAccessIteratorFacade>
+class TestContainer {
+public:
+ typedef Dune::GenericIterator<TestContainer<T,IteratorFacade>,T,T&,std::ptrdiff_t,IteratorFacade> iterator;
+
+ typedef Dune::GenericIterator<const TestContainer<T,IteratorFacade>,const T,const T&,std::ptrdiff_t,IteratorFacade> const_iterator;
+
+ TestContainer(){
+ for(int i=0; i < 100; i++)
+ values_[i]=i;
+ }
+
+ iterator begin(){
+ return iterator(*this, 0);
+ }
+
+ const_iterator begin() const {
+ return const_iterator(*this, 0);
+ }
+
+ iterator end(){
+ return iterator(*this, 100);
+ }
+
+ const_iterator end() const {
+ return const_iterator(*this, 100);
+ }
+
+ T& operator[](int i){
+ return values_[i];
+ }
+
+
+ const T& operator[](int i) const {
+ return values_[i];
+ }
+private:
+ T values_[100];
+};
+
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "dummyiterator.hh"
+
+int main(){
+ // Check that iterator<T> can be compared with iterator<const T> as soon as
+ // a conversion from iterator<T> to iterator<const T> exists
+
+ int value = 0;
+ dummyiterator<int> mit(value);
+ dummyiterator<const int> cit(value);
+
+ bool result = mit == cit;
+
+ if(result) return 0;
+ else return 1;
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+#ifndef DUNE_COMMON_TEST_ITERATORTEST_HH
+#define DUNE_COMMON_TEST_ITERATORTEST_HH
+#include <iostream>
+#include <algorithm>
+#include <dune/common/classname.hh>
+#include <dune/common/typetraits.hh>
+
+/**
+ * @brief Test whether the class Iter implements the interface of an STL output iterator
+ *
+ * @param iterator Iterator to test
+ * @param iterations Number of times that 'iterator' can be safely incremented
+ * @param value A value that is sent to the output iterator
+ */
+template<class Iter, class Value>
+void testOutputIterator(Iter iterator, std::size_t iterations, Value value)
+{
+ // Test whether iterator is copy-constructible
+ // The new iterator object will go out of scope at the end of this method, and hence
+ // destructibility will also be tested.
+ Iter tmp1(iterator);
+
+ // Test whether iterator is copy-assignable
+ Iter tmp2 = iterator;
+
+ // Test whether pre-increment and assignment works
+ for (size_t i=0; i<iterations; ++i, ++tmp1)
+ // An output iterator can only be dereferenced as an lvalue (if in a dereferenceable state).
+ // It shall only be dereferenced as the left-side of an assignment statement.
+ *tmp1 = value;
+
+ // Test whether post-increment and assignment works
+ for (size_t i=0; i<iterations; ++i, tmp2++)
+ *tmp2 = value;
+
+ // Test whether std::iterator_traits is properly specialized
+ // The AlwaysTrue<A> construction allows one to test whether the type A exists at all,
+ // without assuming anything further about A.
+ static_assert(Dune::AlwaysTrue<typename std::iterator_traits<Iter>::difference_type>::value,
+ "std::iterator_traits::difference_type is not defined!");
+ static_assert(Dune::AlwaysTrue<typename std::iterator_traits<Iter>::value_type>::value,
+ "std::iterator_traits::value_type is not defined!");
+ static_assert(Dune::AlwaysTrue<typename std::iterator_traits<Iter>::pointer>::value,
+ "std::iterator_traits::pointer is not defined!");
+ static_assert(Dune::AlwaysTrue<typename std::iterator_traits<Iter>::reference>::value,
+ "std::iterator_traits::reference is not defined!");
+
+ // Make sure the iterator_category is properly set
+ static_assert(std::is_same<typename std::iterator_traits<Iter>::iterator_category, std::output_iterator_tag>::value,
+ "std::iterator_traits::iterator_category is not properly defined!");
+}
+
+/**
+ * @brief Test whether the class Iter implements the interface of an STL forward iterator
+ *
+ * @param begin Iterator positioned at the start
+ * @param end Iterator positioned at the end
+ * @param opt Functor for doing whatever one wants
+ */
+template<class Iter, class Opt>
+int testForwardIterator(Iter begin, Iter end, Opt& opt)
+{
+ // Return status
+ int ret=0;
+
+ // Test whether iterator is can be value-initialized.
+ // These object will go out of scope at the end of this method, and hence
+ // it will also test whether these objects are destructible.
+ Iter defaultConstructedIterator1{}, defaultConstructedIterator2{};
+
+ // Since C++14, value-initialized forward iterators are specified as the
+ // end iterator of the same, empty sequence. Hence, they should compare equal.
+ // Notice that value-initialization and default-initialization are not the
+ // same for raw pointers. Since these are POD, value-initialization leads
+ // to zero-initialization while default-initialization would leave them
+ // uninitialized such that the comparison is undefined behaviour.
+ if (defaultConstructedIterator1 != defaultConstructedIterator2) {
+ std::cerr<<"Default constructed iterators do not compare equal for "+Dune::className<Iter>()+"."<<std::endl;
+ ret=1;
+ }
+
+ // Test whether iterator is copy-constructible
+ Iter tmp1(begin);
+
+ // Test whether iterator is copy-assignable
+ Iter tmp=begin;
+
+ // Test for inequality
+ if (tmp!=begin || tmp1!=begin || tmp!=tmp1) {
+ std::cerr<<" Copying iterator failed "<<__FILE__<<":"<<__LINE__<<std::endl;
+ ret=1;
+ }
+
+ // Test for equality
+ if (not (tmp==begin && tmp1==begin && tmp==tmp1)) {
+ std::cerr<<" Copying iterator failed "<<__FILE__<<":"<<__LINE__<<std::endl;
+ ret=1;
+ }
+
+ // Test whether pre-increment works
+ for(; begin!=end; ++begin)
+ // Test rvalue dereferencing
+ opt(*begin);
+
+ // Test whether post-increment works
+ for(; begin!=end; begin++)
+ opt(*begin);
+
+ // Test whether std::iterator_traits is properly specialized
+ // The is_same<A,A> construction allows one to test whether the type A exists at all,
+ // without assuming anything further about A.
+ static_assert(std::is_same<typename std::iterator_traits<Iter>::difference_type, typename std::iterator_traits<Iter>::difference_type>::value,
+ "std::iterator_traits::difference_type is not defined!");
+ static_assert(std::is_same<typename std::iterator_traits<Iter>::value_type, typename std::iterator_traits<Iter>::value_type>::value,
+ "std::iterator_traits::value_type is not defined!");
+ static_assert(std::is_same<typename std::iterator_traits<Iter>::pointer, typename std::iterator_traits<Iter>::pointer>::value,
+ "std::iterator_traits::pointer is not defined!");
+ static_assert(std::is_same<typename std::iterator_traits<Iter>::reference, typename std::iterator_traits<Iter>::reference>::value,
+ "std::iterator_traits::reference is not defined!");
+
+ // Make sure the iterator_category is properly set
+ static_assert(std::is_same<typename std::iterator_traits<Iter>::iterator_category, std::forward_iterator_tag>::value
+ or std::is_same<typename std::iterator_traits<Iter>::iterator_category, std::bidirectional_iterator_tag>::value
+ or std::is_same<typename std::iterator_traits<Iter>::iterator_category, std::random_access_iterator_tag>::value,
+ "std::iterator_traits::iterator_category is not properly defined!");
+
+ return ret;
+}
+
+/**
+ * @brief Tests the capabilities of a bidirectional iterator.
+ *
+ * Namely it test whether random positions can be reached from
+ * each directions.
+ *
+ * @param begin Iterator positioned at the stsrt.
+ * @param end Iterator positioned at the end.
+ * @param opt Functor for doing whatever one wants.
+ */
+template<class Iter, class Opt>
+int testBidirectionalIterator(Iter begin, Iter end, Opt opt)
+{
+ int ret=testForwardIterator(begin, end, opt);
+ for(Iter pre = end, post = end; pre != begin; )
+ {
+ if(pre != post--)
+ {
+ std::cerr << "Postdecrement did not return the old iterator"
+ << std::endl;
+ ++ret;
+ }
+ if(--pre != post)
+ {
+ std::cerr << "Predecrement did not return the new iterator"
+ << std::endl;
+ ++ret;
+ }
+ opt(*pre);
+ }
+
+ typename Iter::difference_type size = std::distance(begin, end);
+ srand(300);
+
+ int no= (size>10) ? 10 : size;
+
+ for(int i=0; i < no; i++)
+ {
+ int index = static_cast<int>(size*(rand()/(RAND_MAX+1.0)));
+ int backwards=size-index;
+ Iter tbegin = begin;
+ Iter tend = end;
+ for(int j=0; j < index; j++) ++tbegin;
+ for(int j=0; j < backwards; j++) --tend;
+
+ if(tbegin != tend)
+ {
+ std::cerr<<"Did not reach same index by starting forward from "
+ <<"begin and backwards from end."<<std::endl;
+ ++ret;
+ }
+ }
+ return ret;
+}
+
+template<class Iter, class Opt>
+int testRandomAccessIterator(Iter begin, Iter end, Opt opt){
+ int ret=testBidirectionalIterator(begin, end, opt);
+
+ typename Iter::difference_type size = end-begin;
+
+ srand(300);
+
+ int no= (size>10) ? 10 : size;
+
+ for(int i=0; i < no; i++)
+ {
+ int index = static_cast<int>(size*(rand()/(RAND_MAX+1.0)));
+ opt(begin[index]);
+ }
+
+ // Test the less than operator
+ if(begin != end &&!( begin<end))
+ {
+ std::cerr<<"! (begin()<end())"<<std::endl;
+ ret++;
+ }
+
+ if(begin != end) {
+ if(begin-end >= 0) {
+ std::cerr<<"begin!=end, but begin-end >= 0!"<<std::endl;
+ ret++;
+ }
+ if(end-begin <= 0) {
+ std::cerr<<"begin!=end, but end-begin <= 0!"<<std::endl;
+ ret++;
+ }
+ }
+
+ for(int i=0; i < no; i++)
+ {
+ int index = static_cast<int>(size*(rand()/(RAND_MAX+1.0)));
+ Iter rand(begin), test(begin), res{};
+ rand+=index;
+
+ if((res=begin+index) != rand)
+ {
+ std::cerr << " i+n should have the result i+=n, where i is the "
+ <<"iterator and n is the difference type!" <<std::endl;
+ ret++;
+ }
+ for(int j = 0; j < index; j++)
+ ++test;
+
+ if(test != rand)
+ {
+ std::cerr << "i+=n should have the same result as applying the"
+ << "increment ooperator n times!"<< std::endl;
+ ret++;
+ }
+
+ rand=end, test=end;
+ rand-=index;
+
+
+ if((end-index) != rand)
+ {
+ std::cerr << " i-n should have the result i-=n, where i is the "
+ <<"iterator and n is the difference type!" <<std::endl;
+ ret++;
+ }
+ for(int j = 0; j < index; j++)
+ --test;
+
+ if(test != rand)
+ {
+ std::cerr << "i+=n should have the same result as applying the"
+ << "increment ooperator n times!"<< std::endl;
+ ret++;
+ }
+ }
+
+ for(int i=0; i < no; i++)
+ {
+ Iter iter1 = begin+static_cast<int>(size*(rand()/(RAND_MAX+1.0)));
+ Iter iter2 = begin+static_cast<int>(size*(rand()/(RAND_MAX+1.0)));
+ typename Iter::difference_type diff = iter2 -iter1;
+ if((iter1+diff)!=iter2) {
+ std::cerr<< "i+(j-i) = j should hold, where i,j are iterators!"<<std::endl;
+ ret++;
+ }
+ }
+
+ return ret;
+}
+
+template<class Iter, class Opt, typename iterator_category>
+int testIterator(Iter& begin, Iter& end, Opt& opt, iterator_category cat);
+
+template<class Iter, class Opt>
+int testIterator(Iter& begin, Iter& end, Opt& opt, std::forward_iterator_tag)
+{
+ return testForwardIterator(begin, end, opt);
+}
+
+template<class Iter, class Opt>
+int testIterator(Iter& begin, Iter& end, Opt& opt, std::bidirectional_iterator_tag)
+{
+ return testBidirectionalIterator(begin, end, opt);
+}
+
+template<class Iter, class Opt>
+int testIterator(Iter& begin, Iter& end, Opt& opt, std::random_access_iterator_tag)
+{
+ // std::cout << "Testing iterator ";
+ int ret = testRandomAccessIterator(begin, end, opt);
+ //std::cout<<std::endl;
+ return ret;
+}
+
+template<class Iter, class Opt>
+int testConstIterator(Iter& begin, Iter& end, Opt& opt)
+{
+ //std::cout << "Testing constant iterator: ";
+ int ret=testIterator(begin, end, opt, typename std::iterator_traits<Iter>::iterator_category());
+ //std::cout<<std::endl;
+ return ret;
+}
+
+template<bool>
+struct TestSorting
+{
+ template<class Container, typename IteratorTag>
+ static void testSorting(Container&, IteratorTag)
+ {}
+ template<class Container>
+ static void testSorting(Container& c, std::random_access_iterator_tag)
+ {
+ std::sort(c.begin(), c.end());
+ }
+}
+;
+
+template<>
+struct TestSorting<false>
+{
+ template<class Container>
+ static void testSorting(Container&, std::random_access_iterator_tag)
+ {}
+ template<class Container, typename IteratorTag>
+ static void testSorting(Container&, IteratorTag)
+ {}
+};
+
+
+template<class Container, class Opt, bool testSort>
+int testIterator(Container& c, Opt& opt)
+{
+ typename Container::iterator begin=c.begin(), end=c.end();
+ typename Container::const_iterator cbegin(begin);
+ [[maybe_unused]] typename Container::const_iterator cbegin1 = begin;
+ typename Container::const_iterator cend=c.end();
+ int ret = 0;
+
+ TestSorting<testSort>::testSorting(c, typename std::iterator_traits<typename Container::iterator>::iterator_category());
+
+ if(end!=cend || cend!=end)
+ {
+ std::cerr<<"constant and mutable iterators should be equal!"<<std::endl;
+ ret=1;
+ }
+ ret += testConstIterator(cbegin, cend, opt);
+ if(testSort)
+ ret += testIterator(begin,end,opt);
+
+ return ret;
+}
+
+template<class Container, class Opt>
+int testIterator(Container& c, Opt& opt)
+{
+ return testIterator<Container,Opt,true>(c,opt);
+}
+
+template<class Iter, class Opt>
+void testAssignment(Iter begin, Iter end, Opt&)
+{
+ //std::cout << "Assignment: ";
+ for(; begin!=end; begin++)
+ *begin=typename std::iterator_traits<Iter>::value_type();
+ //std::cout<<" Done."<< std::endl;
+}
+
+template<class Iter, class Opt>
+int testIterator(Iter& begin, Iter& end, Opt& opt)
+{
+ testAssignment(begin, end, opt);
+ return testConstIterator(begin, end, opt);
+}
+
+
+template<class T>
+class Printer {
+ typename std::remove_const<T>::type res;
+public:
+ Printer() : res(0){}
+ void operator()(const T& t){
+ res+=t;
+ // std::cout << t <<" ";
+ }
+};
+
+template<class Container, class Opt>
+int testIterator(const Container& c, Opt& opt)
+{
+ typename Container::const_iterator begin=c.begin(), end=c.end();
+ return testConstIterator(begin,end, opt);
+}
+
+
+template<class Container>
+int testIterator(Container& c)
+{
+ Printer<typename std::iterator_traits<typename Container::iterator>::value_type> print;
+ return testIterator(c,print);
+}
+
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#include <assert.h>
+#include <iostream>
+#include <dune/common/lru.hh>
+#include <dune/common/parallel/mpihelper.hh>
+
+void lru_test()
+{
+ std::cout << "testing Dune::lru<int,double>\n";
+
+ Dune::lru<int, double> lru;
+ lru.insert(10, 1.0);
+ assert(lru.front() == lru.back());
+ lru.insert(11, 2.0);
+ assert(lru.front() == 2.0 && lru.back() == 1.0);
+ lru.insert(12, 99);
+ lru.insert(13, 1.3);
+ lru.insert(14, 12345);
+ lru.insert(15, -17);
+ assert(lru.front() == -17 && lru.back() == 1.0);
+ // update
+ lru.insert(10);
+ assert(lru.front() == 1.0 && lru.back() == 2.0);
+ // update
+ lru.touch(13);
+ assert(lru.front() == 1.3 && lru.back() == 2.0);
+ // remove item
+ lru.pop_front();
+ assert(lru.front() == 1.0 && lru.back() == 2.0);
+ // remove item
+ lru.pop_back();
+ assert(lru.front() == 1.0 && lru.back() == 99);
+
+ std::cout << "... passed\n";
+}
+
+int main (int argc, char** argv)
+{
+ Dune::MPIHelper::instance(argc,argv);
+
+ lru_test();
+
+ return 0;
+}
--- /dev/null
+#include <complex>
+#include <iostream>
+#include <limits>
+
+#include <dune/common/math.hh>
+
+int main() {
+ //Initialize some variables
+ int a = 42;
+ const int b = 42;
+
+ double nan = std::nan("");
+ double inf = std::numeric_limits<double>::infinity();
+
+ std::complex<double> complex_nonan(42., 42.);
+ std::complex<double> complex_nan1(42.,nan);
+ std::complex<double> complex_nan2(nan, 42.);
+ std::complex<double> complex_nan3(nan, nan);
+
+ std::complex<double> complex_noinf(42., 42.);
+ std::complex<double> complex_inf1(42.,inf);
+ std::complex<double> complex_inf2(inf, 42.);
+ std::complex<double> complex_inf3(inf, inf);
+
+ std::cout << std::boolalpha
+ //check isNaN()
+ << "isNaN(int): " << Dune::isNaN(a) << "\n"
+ << "isNaN(const int): " << Dune::isNaN(b) << "\n"
+ << "isNaN(42): " << Dune::isNaN(42) << "\n"
+ << "isNaN(nan): " << Dune::isNaN(nan) << "\n"
+ << "isNaN(inf): " << Dune::isNaN(inf) << "\n"
+
+ << "isNaN(std::complex<double> without NaN): "
+ << Dune::isNaN(complex_nonan) << "\n"
+ << "isNaN(std::complex<double> with NaN): "
+ << Dune::isNaN(complex_nan1) << " "
+ << Dune::isNaN(complex_nan2) << " "
+ << Dune::isNaN(complex_nan3) << "\n"
+
+ //check isInf()
+ << "isInf(int): " << Dune::isInf(a) << "\n"
+ << "isInf(const int): " << Dune::isInf(b) << "\n"
+ << "isInf(42): " << Dune::isInf(42) << "\n"
+ << "isInf(inf): " << Dune::isInf(inf) << "\n"
+
+ << "isInf(std::complex<double> without inf): "
+ << Dune::isInf(complex_noinf) << "\n"
+ << "isInf(std::complex<double> with inf): "
+ << Dune::isInf(complex_inf1) << " "
+ << Dune::isInf(complex_inf2) << " "
+ << Dune::isInf(complex_inf3) << "\n"
+
+ //check isFinite()
+ << "isFinite(int): " << Dune::isFinite(a) << "\n"
+ << "isFinite(const int): " << Dune::isFinite(b) << "\n"
+ << "isFinite(42): " << Dune::isFinite(42) << "\n"
+ << "isFinite(inf): " << Dune::isFinite(inf) << "\n"
+
+ << "isFinite(std::complex<double> without inf): "
+ << Dune::isFinite(complex_noinf) << "\n"
+ << "isFinite(std::complex<double> with inf): "
+ << Dune::isFinite(complex_inf1) << " "
+ << Dune::isFinite(complex_inf2) << " "
+ << Dune::isFinite(complex_inf3) << "\n"
+
+ << std::endl;
+}
--- /dev/null
+#include "config.h"
+
+#include <iostream>
+
+#include <dune/common/hybridutilities.hh>
+#include <dune/common/indices.hh>
+#include <dune/common/math.hh>
+#include <dune/common/test/testsuite.hh>
+
+#include <dune/common/math.hh>
+
+
+using namespace Dune::Hybrid;
+using namespace Dune::Indices;
+using Dune::TestSuite;
+
+template<class T, T n>
+constexpr inline static auto next(std::integral_constant<T, n>)
+ -> std::integral_constant<T, n+1>
+{
+ return {};
+}
+
+template<class T, T k>
+auto testStaticFactorial (std::integral_constant<T, k> _k = {}) -> TestSuite
+{
+ TestSuite t;
+
+ std::cout << "test static factorial\n{";
+
+ forEach(integralRange(_k), [&](auto _i) {
+ auto value = Dune::factorial(_i);
+
+ t.check(decltype(value)::value == Dune::Factorial<decltype(_i)::value>::factorial);
+
+ std::cout<< ' ' << value() << ',';
+ });
+
+ std::cout << "};\n\n";
+
+ return t;
+}
+
+template<class T, T k>
+auto testStaticBinomial (std::integral_constant<T, k> _k = {}) -> TestSuite
+{
+ TestSuite t;
+
+ std::cout << "test static binomial\n";
+
+ forEach(integralRange(_k), [&](auto _i) {
+ std::cout << "{";
+ forEach(integralRange(next(_i)), [&](auto _j) {
+ const auto value = Dune::binomial(_i,_j);
+
+ auto control = Dune::Factorial<decltype(_i)::value>::factorial
+ / Dune::Factorial<decltype(_j)::value>::factorial
+ / Dune::Factorial<decltype(_i)::value - decltype(_j)::value>::factorial;
+ t.check(decltype(value)::value == control);
+
+ std::cout<< ' ' << value() << ',';
+ });
+
+ std::cout << "};\n";
+ });
+
+ std::cout << "\n";
+
+ return t;
+}
+
+int main(int argc, char** argv)
+{
+ TestSuite t;
+
+ t.subTest(testStaticFactorial(_5));
+ t.subTest(testStaticBinomial(_5));
+
+ return t.exit();
+}
--- /dev/null
+#include <config.h>
+#include <iostream>
+#include <vector>
+
+#if ! HAVE_METIS
+#error "METIS is required for this test"
+#endif
+
+#if HAVE_SCOTCH_METIS
+extern "C" {
+ #include <scotch.h>
+}
+#endif
+
+extern "C" {
+ #include <metis.h>
+}
+
+#if HAVE_SCOTCH_METIS && !defined(SCOTCH_METIS_RETURN)
+ // NOTE: scotchmetis does not define a return type for METIS functions
+ #define METIS_OK 1
+#endif
+
+int main()
+{
+#if defined(REALTYPEWIDTH) || defined(SCOTCH_METIS_DATATYPES)
+ using real_t = ::real_t;
+#else
+ using real_t = double;
+#endif
+
+#if defined(IDXTYPEWIDTH) || defined(SCOTCH_METIS_DATATYPES)
+ using idx_t = ::idx_t;
+#elif HAVE_SCOTCH_METIS
+ using idx_t = SCOTCH_Num;
+#else
+ using idx_t = int;
+#endif
+
+ idx_t nVertices = 6; // number of vertices
+ idx_t nCon = 1; // number of constraints
+ idx_t nParts = 2; // number of partitions
+
+ // Partition index for each vertex. Will be filled by METIS_PartGraphKway.
+ std::vector<idx_t> part(nVertices, 0);
+
+ // Indices of starting points in adjacent array
+ std::vector<idx_t> xadj{0,2,5,7,9,12,14};
+
+ // Adjacent vertices in consecutive order
+ std::vector<idx_t> adjncy{1,3,0,4,2,1,5,0,4,3,1,5,4,2};
+
+ // Weights of vertices. If all weights are equal, they can be set to 1.
+ std::vector<idx_t> vwgt(nVertices * nCon, 1);
+
+ // Load-imbalance tolerance for each constraint.
+#if HAVE_SCOTCH_METIS
+ // NOTE: scotchmetis interprets this parameter differently
+ std::vector<real_t> ubvec(nCon, 0.01);
+#else
+ std::vector<real_t> ubvec(nCon, 1.001);
+#endif
+
+#if METIS_API_VERSION >= 5
+
+ std::cout << "using METIS API version 5\n";
+
+ idx_t objval;
+ int err = METIS_PartGraphKway(&nVertices, &nCon, xadj.data(), adjncy.data(),
+ vwgt.data(), nullptr, nullptr, &nParts, nullptr,
+ ubvec.data(), nullptr, &objval, part.data());
+
+#elif METIS_API_VERSION >= 3
+
+ std::cout << "using METIS API version 3\n";
+
+ int wgtflag = 2;
+ int numflag = 0;
+ int options = 0; // use default options
+
+ int edgecut;
+#if HAVE_SCOTCH_METIS && ! defined(SCOTCH_METIS_RETURN)
+ METIS_PartGraphKway(&nVertices, xadj.data(), adjncy.data(), vwgt.data(),
+ nullptr, &wgtflag, &numflag, &nParts, &options, &edgecut, part.data());
+ int err = METIS_OK;
+#else
+ int err = METIS_PartGraphKway(&nVertices, xadj.data(), adjncy.data(), vwgt.data(),
+ nullptr, &wgtflag, &numflag, &nParts, &options, &edgecut, part.data());
+#endif
+
+#endif // METIS_API_VERSION
+
+ if (err != METIS_OK)
+ return 1;
+
+ for (std::size_t part_i = 0; part_i < part.size(); ++part_i) {
+ // partition index must be in range [0,nParts)
+ if (part[part_i] >= nParts)
+ return 2;
+
+ std::cout << part_i << " " << part[part_i] << std::endl;
+ }
+
+ return 0;
+}
\ No newline at end of file
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <dune/common/parallel/mpihelper.hh>
+#include <dune/common/parallel/mpicommunication.hh>
+#include <dune/common/test/testsuite.hh>
+
+#include <iostream>
+int main(int argc, char** argv)
+{
+ Dune::TestSuite t;
+
+ typedef Dune::MPIHelper Helper;
+ Helper& mpi = Helper::instance(argc, argv);
+
+ {
+ typedef Helper::MPICommunicator MPIComm;
+ Dune::Communication<MPIComm> comm(mpi.getCommunicator());
+
+ enum { length = 5 };
+ double values[5];
+ for(int i=0; i<length; ++i) values[i] = 1.0;
+
+ double * commBuff = ((double *) &values[0]);
+ // calculate global sum
+ comm.sum( commBuff , length );
+
+ double val[length];
+ for(int i=0; i<length; ++i) val[i] = 1.0;
+ // calculate global sum by calling sum for each component
+ for(int i=0; i<length; ++i)
+ {
+ // this method works
+ val[i] = comm.sum( val[i] );
+ }
+
+ // result from above should be size of job
+ double size = mpi.size();
+ for(int i=0; i<length; ++i)
+ {
+ t.check( std::abs( values[i] - size ) < 1e-8 );
+ t.check( std::abs( val[i] - size ) < 1e-8 );
+ }
+
+ {
+ int i = 1;
+ const auto sum = comm.sum(i);
+ t.check(sum == comm.size())
+ << "sum of 1 must be equal to number of processes";
+ }
+ {
+ const int i = 1;
+ const auto sum = comm.sum(i);
+ t.check(sum == comm.size())
+ << "sum of 1 must be equal to number of processes";
+ }
+ }
+
+ std::cout << "We are at the end!"<<std::endl;
+
+ return t.exit();
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <dune/common/parallel/mpihelper.hh>
+#include <dune/common/parallel/mpicommunication.hh>
+#include <dune/common/test/testsuite.hh>
+
+#include <iostream>
+int main(int argc, char** argv)
+{
+ Dune::TestSuite t;
+
+ typedef Dune::MPIHelper Helper;
+ Helper& mpi = Helper::instance(argc, argv);
+
+ {
+ typedef Helper::MPICommunicator MPIComm;
+ Dune::Communication<MPIComm> comm(mpi.getCommunicator());
+
+ enum { length = 5 };
+ double values[5];
+ for(int i=0; i<length; ++i) values[i] = 1.0;
+
+ double * commBuff = ((double *) &values[0]);
+ // calculate global sum
+ comm.sum( commBuff , length );
+
+ double val[length];
+ for(int i=0; i<length; ++i) val[i] = 1.0;
+ // calculate global sum by calling sum for each component
+ for(int i=0; i<length; ++i)
+ {
+ // this method works
+ val[i] = comm.sum( val[i] );
+ }
+
+ // result from above should be size of job
+ double size = mpi.size();
+ for(int i=0; i<length; ++i)
+ {
+ t.check( std::abs( values[i] - size ) < 1e-8 );
+ t.check( std::abs( val[i] - size ) < 1e-8 );
+ }
+
+ {
+ int i = 1;
+ const auto sum = comm.sum(i);
+ t.check(sum == comm.size())
+ << "sum of 1 must be equal to number of processes";
+ }
+ {
+ const int i = 1;
+ const auto sum = comm.sum(i);
+ t.check(sum == comm.size())
+ << "sum of 1 must be equal to number of processes";
+ }
+ }
+
+ std::cout << "We are at the end!"<<std::endl;
+
+ return t.exit();
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#include <config.h>
+
+#include <dune/common/parallel/mpihelper.hh>
+#include <dune/common/parallel/mpiguard.hh>
+
+int main(int argc, char** argv)
+{
+ Dune::MPIHelper & mpihelper = Dune::MPIHelper::instance(argc, argv);
+
+ if (mpihelper.rank() == 0)
+ std::cout << "---- default constructor" << std::endl;
+ try
+ {
+ // at the end of this block the guard is destroyed and possible exceptions are communicated
+ {
+ Dune::MPIGuard guard;
+ if (mpihelper.rank() > 0)
+ DUNE_THROW(Dune::Exception, "Fakeproblem on process " << mpihelper.rank());
+ guard.finalize();
+ }
+ }
+ catch (Dune::Exception & e)
+ {
+ std::cout << "Error (rank " << mpihelper.rank() << "): "
+ << e.what() << std::endl;
+ }
+
+ mpihelper.getCommunication().barrier();
+ if (mpihelper.rank() == 0)
+ std::cout << "---- guard(MPI_COMM_WORLD)" << std::endl;
+ try
+ {
+#if HAVE_MPI
+ // at the end of this block the guard is destroyed and possible exceptions are communicated
+ {
+ Dune::MPIGuard guard(MPI_COMM_WORLD);
+ if (mpihelper.rank() > 0)
+ DUNE_THROW(Dune::Exception, "Fakeproblem on process " << mpihelper.rank());
+ guard.finalize();
+ }
+#else
+ std::cout << "Info: no mpi used\n";
+#endif
+ }
+ catch (Dune::Exception & e)
+ {
+ std::cout << "Error (rank " << mpihelper.rank() << "): "
+ << e.what() << std::endl;
+ }
+
+ mpihelper.getCommunication().barrier();
+ if (mpihelper.rank() == 0)
+ std::cout << "---- guard(MPIHelper)" << std::endl;
+ try
+ {
+ // at the end of this block the guard is destroyed and possible exceptions are communicated
+ {
+ Dune::MPIGuard guard(mpihelper);
+ if (mpihelper.rank() > 0)
+ DUNE_THROW(Dune::Exception, "Fakeproblem on process " << mpihelper.rank());
+ guard.finalize();
+ }
+ }
+ catch (Dune::Exception & e)
+ {
+ std::cout << "Error (rank " << mpihelper.rank() << "): "
+ << e.what() << std::endl;
+ }
+
+
+ mpihelper.getCommunication().barrier();
+ if (mpihelper.rank() == 0)
+ std::cout << "---- manual error" << std::endl;
+ try
+ {
+ // at the end of this block the guard is destroyed and possible exceptions are communicated
+ {
+ Dune::MPIGuard guard;
+ guard.finalize(mpihelper.rank() > 0);
+ }
+ }
+ catch (Dune::Exception & e)
+ {
+ std::cout << "Error (rank " << mpihelper.rank() << "): "
+ << e.what() << std::endl;
+ }
+
+ mpihelper.getCommunication().barrier();
+ if (mpihelper.rank() == 0)
+ std::cout << "---- done" << std::endl;
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <iostream>
+
+#include <dune/common/parallel/mpihelper.hh>
+
+int main(int argc, char** argv)
+{
+
+#ifdef MPIHELPER_PREINITIALIZE
+#if HAVE_MPI
+ MPI_Init(&argc, &argv);
+#endif
+#endif
+
+ typedef Dune::MPIHelper Helper;
+
+ {
+ Helper& mpi = Helper::instance(argc, argv);
+
+ [[maybe_unused]] Helper::MPICommunicator comm = mpi.getCommunicator();
+ comm= mpi.getCommunicator();
+ }
+
+ {
+ Helper& mpi = Helper::instance(argc, argv);
+
+ [[maybe_unused]] Helper::MPICommunicator comm = mpi.getCommunicator();
+ comm= mpi.getCommunicator();
+
+#ifdef MPIHELPER_PREINITIALIZE
+#if HAVE_MPI
+ MPI_Finalize();
+#endif
+#endif
+ }
+ std::cout << "We are at the end!"<<std::endl;
+
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+
+#include <tuple>
+
+#include <dune/common/overloadset.hh>
+#include <dune/common/hybridutilities.hh>
+
+#include <dune/common/test/testsuite.hh>
+
+struct Bar {
+ int bar() const { return 0; }
+};
+
+
+
+int main()
+{
+ Dune::TestSuite test;
+
+ {
+ auto foo = Dune::overload(
+ [](double /*i*/) { return 0; },
+ [](int /*i*/) { return 1; },
+ [](long /*i*/) { return 2; });
+
+ test.check(foo(3.14) == 0)
+ << "incorrect overload selected from OverloadSet";
+ test.check(foo(int(42)) == 1)
+ << "incorrect overload selected from OverloadSet";
+ test.check(foo(long(42)) == 2)
+ << "incorrect overload selected from OverloadSet";
+ }
+
+ {
+ auto foo = Dune::orderedOverload(
+ [](double /*i*/) { return 0; },
+ [](int /*i*/) { return 1; },
+ [](long /*i*/) { return 2; });
+
+ test.check(foo(3.14) == 0)
+ << "incorrect overload selected from OverloadSet";
+ test.check(foo(int(42)) == 0)
+ << "incorrect overload selected from OverloadSet";
+ test.check(foo(long(42)) == 0)
+ << "incorrect overload selected from OverloadSet";
+ }
+
+ {
+ auto foo = Dune::overload(
+ [](const int& /*i*/) { return 0; },
+ [](int&& /*i*/) { return 1; });
+
+ int i = 0;
+ test.check(foo(long(42)) == 1)
+ << "incorrect overload selected from OverloadSet";
+ test.check(foo(int(42)) == 1)
+ << "incorrect overload selected from OverloadSet";
+ test.check(foo(i) == 0)
+ << "incorrect overload selected from OverloadSet";
+ }
+
+ {
+ auto foo = Dune::orderedOverload(
+ [](const int& /*i*/) { return 0; },
+ [](int&& /*i*/) { return 1; });
+
+ int i = 0;
+ test.check(foo(long(42)) == 0)
+ << "incorrect overload selected from OverloadSet";
+ test.check(foo(int(42)) == 0)
+ << "incorrect overload selected from OverloadSet";
+ test.check(foo(i) == 0)
+ << "incorrect overload selected from OverloadSet";
+ }
+
+ {
+ auto t = std::make_tuple(42, "foo", 3.14);
+
+ auto typeToName = Dune::overload(
+ [](int) { return "int"; },
+ [](long) { return "long"; },
+ [](std::string) { return "string"; },
+ [](float) { return "float"; },
+ [](double) { return "double"; });
+
+ std::string tupleTypes;
+ Dune::Hybrid::forEach(t, [&](auto&& ti) {
+ tupleTypes += typeToName(ti);
+ });
+
+ test.check(tupleTypes == "intstringdouble")
+ << "traversal of tuple called incorrect overloads";
+ }
+
+ {
+ // Check if templated and non-templed overloads work
+ // nicely together.
+ auto f = Dune::overload(
+ [](const int& t) { (void) t;},
+ [](const auto& t) { t.bar();});
+ f(0);
+ }
+
+
+ return test.exit();
+}
--- /dev/null
+#include "config.h"
+#include <iostream>
+#include <cassert>
+#include <memory>
+#include <tuple>
+#include <dune/common/parameterizedobject.hh>
+#include <dune/common/parametertree.hh>
+#include <dune/common/shared_ptr.hh>
+
+#include "parameterizedobjectfactorysingleton.hh"
+
+DefineImplementation(InterfaceA, Aix, int);
+DefineImplementation(InterfaceA, Bix, int);
+
+int init_Factory()
+{
+ globalPtrFactory<InterfaceA>().define<Aix>("Aix");
+ globalPtrFactory<InterfaceA>().define("Bix", [](int i) { return std::make_unique<Bix>(i); });
+ return 0;
+}
+
+[[maybe_unused]] static const int init = init_Factory();
--- /dev/null
+#ifndef DUNE_COMMON_TEST_PARAMETERIZEDOBJECTFACTORYSINGLETON_HH
+#define DUNE_COMMON_TEST_PARAMETERIZEDOBJECTFACTORYSINGLETON_HH
+
+#include <dune/common/parameterizedobject.hh>
+#include <dune/common/singleton.hh>
+#include <string>
+
+#define DefineImplementation2(IF,T) \
+ struct T : public IF { \
+ T() {} \
+ std::string info() override { \
+ return #T; \
+ } \
+ }
+
+#define DefineImplementation(IF,T,...) \
+ struct T : public IF { \
+ T(__VA_ARGS__) {} \
+ std::string info() override { \
+ return #T; \
+ } \
+ }
+
+struct InterfaceA
+{
+ virtual std::string info() = 0;
+ virtual ~InterfaceA() = default;
+};
+
+struct InterfaceB
+{
+ virtual std::string info() = 0;
+ virtual ~InterfaceB() = default;
+};
+
+template<typename Interface>
+Dune::ParameterizedObjectFactory<std::unique_ptr<Interface>(int)> &
+globalPtrFactory()
+{
+ return Dune::Singleton<Dune::ParameterizedObjectFactory<std::unique_ptr<Interface>(int)>>::instance();
+}
+
+#endif //#ifndef DUNE_COMMON_TEST_PARAMETERIZEDOBJECTFACTORYSINGLETON_HH
--- /dev/null
+#include "config.h"
+#include <iostream>
+#include <cassert>
+#include <memory>
+#include <tuple>
+#include <dune/common/parametertree.hh>
+#include <dune/common/shared_ptr.hh>
+
+#include "parameterizedobjectfactorysingleton.hh"
+
+DefineImplementation(InterfaceA, Ai, int);
+DefineImplementation(InterfaceA, Bi, int);
+DefineImplementation2(InterfaceA, Ax);
+DefineImplementation2(InterfaceA, Bx);
+DefineImplementation(InterfaceA, Ad, const Dune::ParameterTree&);
+DefineImplementation(InterfaceA, Bd, Dune::ParameterTree);
+DefineImplementation(InterfaceB, Ais, int, std::string);
+DefineImplementation(InterfaceB, Bis, int, std::string);
+
+#define CheckInstance2(F,T) \
+ assert(#T == F.create(#T)->info())
+
+#define CheckInstance(F,T,...) \
+ assert(#T == F.create(#T,##__VA_ARGS__)->info())
+
+struct AImp : public InterfaceA
+{
+ AImp(std::string s) :
+ s_(s)
+ {}
+
+ AImp(const AImp& /*other*/) :
+ s_("copied")
+ {}
+
+ std::string info() override
+ {
+ return s_;
+ }
+ std::string s_;
+};
+
+int main()
+{
+ // int as parameter
+ // Dune::ParameterizedObjectFactory<std::unique_ptr<InterfaceA>(int)> FactoryA;
+ globalPtrFactory<InterfaceA>().define<Ai>("Ai");
+ globalPtrFactory<InterfaceA>().define<Bi>("Bi");
+ globalPtrFactory<InterfaceA>().define("Ax", [](int /*i*/) { return std::make_unique<Ax>(); });
+ CheckInstance(globalPtrFactory<InterfaceA>(), Ai, 0);
+ CheckInstance(globalPtrFactory<InterfaceA>(), Bi, 1);
+ CheckInstance(globalPtrFactory<InterfaceA>(), Ax, 1);
+ // int as parameter for external factory
+ CheckInstance(globalPtrFactory<InterfaceA>(), Aix, 0);
+ CheckInstance(globalPtrFactory<InterfaceA>(), Bix, 1);
+
+ // default constructor
+ Dune::ParameterizedObjectFactory<std::shared_ptr<InterfaceA>()> FactoryAd;
+ FactoryAd.define<Ax>("Ax");
+ FactoryAd.define<Bx>("Bx");
+ FactoryAd.define("Ai", []() { return std::make_shared<Ai>(0); });
+ AImp aimp("onStack");
+ FactoryAd.define("AImp", [&]() { return Dune::stackobject_to_shared_ptr<AImp>(aimp); });
+ FactoryAd.define("AImp2", Dune::stackobject_to_shared_ptr<AImp>(aimp));
+ FactoryAd.define("AImp3", std::make_shared<AImp>("shared"));
+ Dune::ParameterTree param;
+ CheckInstance2(FactoryAd, Ax);
+ CheckInstance2(FactoryAd, Bx);
+ CheckInstance2(FactoryAd, Ai);
+ std::cout << FactoryAd.create("AImp")->info() << std::endl;
+ std::cout << FactoryAd.create("AImp2")->info() << std::endl;
+ std::cout << FactoryAd.create("AImp3")->info() << std::endl;
+
+ // explicitly request the default constructor
+ Dune::ParameterizedObjectFactory<std::unique_ptr<InterfaceA>()> FactoryAx;
+ FactoryAx.define<Ax>("Ax");
+ FactoryAx.define<Bx>("Bx");
+ CheckInstance2(FactoryAx, Ax);
+ CheckInstance2(FactoryAx, Bx);
+
+ // multiple parameters
+ Dune::ParameterizedObjectFactory<std::unique_ptr<InterfaceB>(int, std::string)> FactoryB;
+ FactoryB.define<Ais>("Ais");
+ FactoryB.define<Bis>("Bis");
+ CheckInstance(FactoryB, Ais, 0, std::to_string(2));
+ CheckInstance(FactoryB, Bis, 1, "Hallo");
+
+ // check for ambiguous overloads
+ Dune::ParameterizedObjectFactory<bool()> FactoryBool;
+ FactoryBool.define("true",true);
+ FactoryBool.define("false",[](){return false;});
+
+ // value semantics
+ Dune::ParameterizedObjectFactory<std::function<double(double)>(int)> FactoryC;
+ FactoryC.define("fi", [](int i) {
+ return [=](double x) { return x+i;};
+ });
+ FactoryC.define("fi1", [](int i) {
+ return [=](double x) { return x+i+1;};
+ });
+ assert(FactoryC.create("fi", 42)(0) == 42);
+ assert(FactoryC.create("fi1", 42)(0) == 43);
+
+}
--- /dev/null
+#if HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <cstdlib>
+#include <iostream>
+#include <locale>
+#include <ostream>
+#include <stdexcept>
+#include <string>
+#include <vector>
+
+#include <dune/common/exceptions.hh>
+#include <dune/common/fvector.hh>
+#include <dune/common/parametertree.hh>
+
+// This assert macro does not depend on the value of NDEBUG
+#define check_assert(expr) \
+ do \
+ { \
+ if(!(expr)) \
+ { \
+ std::cerr << __FILE__ << ":" << __LINE__ << ": check_assert(" \
+ << #expr << ") failed" << std::endl; \
+ std::abort(); \
+ } \
+ } while(false)
+
+// Check that the given expression throws the given exception
+#define check_throw(expr, except) \
+ do { \
+ try { \
+ expr; \
+ std::cerr << __FILE__ << ":" << __LINE__ << ": " << #expr \
+ << " should throw " << #except << std::endl; \
+ std::abort(); \
+ } \
+ catch(const except&) {} \
+ catch(...) { \
+ std::cerr << __FILE__ << ":" << __LINE__ << ": " << #expr \
+ << " should throw " << #except << std::endl; \
+ std::abort(); \
+ } \
+ } while(false)
+
+// globally set a locale that uses "," as the decimal seperator.
+// return false if no such locale is installed on the system
+bool setCommaLocale()
+{
+ static char const* const commaLocales[] = {
+ "de", "de@euro", "de.UTF-8",
+ "de_AT", "de_AT@euro", "de_AT.UTF-8",
+ "de_BE", "de_BE@euro", "de_BE.UTF-8",
+ "de_CH", "de_CH@euro", "de_CH.UTF-8",
+ "de_DE", "de_DE@euro", "de_DE.UTF-8",
+ "de_LI", "de_LI@euro", "de_LI.UTF-8",
+ "de_LU", "de_LU@euro", "de_LU.UTF-8",
+ NULL
+ };
+ for(char const* const* loc = commaLocales; *loc; ++loc)
+ {
+ try {
+ std::locale::global(std::locale(*loc));
+ std::cout << "Using comma-locale " << std::locale().name() << std::endl;
+ return true;
+ }
+ catch(const std::runtime_error&) { }
+ }
+
+ std::cout << "No comma-using locale found on system, tried the following:";
+ std::string sep = " ";
+ for(char const* const* loc = commaLocales; *loc; ++loc)
+ {
+ std::cout << sep << *loc;
+ sep = ", ";
+ }
+ std::cout << std::endl;
+ return false;
+}
+
+int main()
+{
+ if(!setCommaLocale())
+ {
+ std::cerr << "No locale using comma as decimal seperator found on system"
+ << std::endl;
+ return 77;
+ }
+ { // Try with comma
+ Dune::ParameterTree ptree;
+ check_throw((ptree["setting"] = "42,42",
+ ptree.get<double>("setting")),
+ Dune::RangeError);
+ check_throw((ptree["setting"] = "42 2,5",
+ ptree.get<Dune::FieldVector<double, 2> >("setting")),
+ Dune::RangeError);
+ check_throw((ptree["setting"] = "42 2,5",
+ ptree.get<std::vector<double> >("setting")),
+ Dune::RangeError);
+ }
+ { // Try with point
+ Dune::ParameterTree ptree;
+ check_assert((ptree["setting"] = "42.42",
+ ptree.get<double>("setting") == 42.42));
+ check_assert((ptree["setting"] = "42 2.5",
+ ptree.get<Dune::FieldVector<double, 2> >("setting")
+ == Dune::FieldVector<double, 2>{42.0, 2.5}));
+ check_assert((ptree["setting"] = "42 2.5",
+ ptree.get<std::vector<double> >("setting")
+ == std::vector<double>{42.0, 2.5}));
+ }
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <array>
+#include <cstdlib>
+#include <iostream>
+#include <ostream>
+#include <sstream>
+
+#include <dune/common/exceptions.hh>
+#include <dune/common/parametertree.hh>
+#include <dune/common/parametertreeparser.hh>
+
+// This assert macro does not depend on the value of NDEBUG
+#define check_assert(expr) \
+ do \
+ { \
+ if(!(expr)) \
+ { \
+ std::cerr << __FILE__ << ":" << __LINE__ << ": check_assert(" \
+ << #expr << ") failed" << std::endl; \
+ std::abort(); \
+ } \
+ } while(false)
+
+// Check that the given expression throws the given exception
+#define check_throw(expr, except) \
+ do { \
+ try { \
+ expr; \
+ std::cerr << __FILE__ << ":" << __LINE__ << ": " << #expr \
+ << " should throw " << #except << std::endl; \
+ std::abort(); \
+ } \
+ catch(const except&) {} \
+ catch(...) { \
+ std::cerr << __FILE__ << ":" << __LINE__ << ": " << #expr \
+ << " should throw " << #except << std::endl; \
+ std::abort(); \
+ } \
+ } while(false)
+
+template<class P>
+void testparam(const P & p)
+{
+ // try accessing key
+ check_assert(p.template get<int>("x1") == 1);
+ check_assert(p.template get<double>("x1") == 1.0);
+ check_assert(p.template get<std::string>("x2") == "hallo");
+ check_assert(p.template get<bool>("x3") == false);
+ // try reading array like structures
+ std::vector<unsigned int>
+ array1 = p.template get< std::vector<unsigned int> >("array");
+ std::array<unsigned int, 8>
+ array2 = p.template get< std::array<unsigned int, 8> >("array");
+ Dune::FieldVector<unsigned int, 8>
+ array3 = p.template get< Dune::FieldVector<unsigned int, 8> >("array");
+ check_assert(array1.size() == 8);
+ for (unsigned int i=0; i<8; i++)
+ {
+ check_assert(array1[i] == i+1);
+ check_assert(array2[i] == i+1);
+ check_assert(array3[i] == i+1);
+ }
+ // try accessing subtree
+ p.sub("Foo");
+ p.sub("Foo").template get<std::string>("peng");
+ // check hasSub and hasKey
+ check_assert(p.hasSub("Foo"));
+ check_assert(!p.hasSub("x1"));
+ check_assert(p.hasKey("x1"));
+ check_assert(!p.hasKey("Foo"));
+ // try accessing inexistent key
+ try {
+ p.template get<int>("bar");
+ DUNE_THROW(Dune::Exception, "failed to detect missing key");
+ }
+ catch (Dune::RangeError & r) {}
+ // try accessing inexistent subtree in throwing mode
+ try {
+ p.sub("bar",true);
+ DUNE_THROW(Dune::Exception, "failed to detect missing subtree");
+ }
+ catch (Dune::RangeError & r) {}
+ // try accessing inexistent nested subtree in throwing mode
+ try {
+ p.sub("Foo.Zoo",true);
+ DUNE_THROW(Dune::Exception, "failed to detect missing nested subtree");
+ }
+ catch (Dune::RangeError & r) {}
+ // try accessing inexistent subtree in non-throwing mode
+ p.sub("bar");
+ // try accessing inexistent subtree that shadows a value key
+ try {
+ p.sub("x1.bar");
+ DUNE_THROW(Dune::Exception, "succeeded to access non-existent subtree that shadows a value key");
+ }
+ catch (Dune::RangeError & r) {}
+ // try accessing key as subtree
+ try {
+ p.sub("x1");
+ DUNE_THROW(Dune::Exception, "succeeded to access key as subtree");
+ }
+ catch (Dune::RangeError & r) {}
+ // try accessing subtree as key
+ try {
+ p.template get<double>("Foo");
+ DUNE_THROW(Dune::Exception, "succeeded to access subtree as key");
+ }
+ catch (Dune::RangeError & r) {}
+}
+
+template<class P>
+void testmodify(P parameterSet)
+{
+ parameterSet["testDouble"] = "3.14";
+ parameterSet["testInt"] = "42";
+ parameterSet["testString"] = "Hallo Welt!";
+ parameterSet["testVector"] = "2 3 5 7 11";
+ parameterSet.sub("Foo")["bar"] = "2";
+
+ double testDouble = parameterSet.template get<double>("testDouble");
+ int testInt = parameterSet.template get<int>("testInt");
+ ++testDouble;
+ ++testInt;
+ std::string testString = parameterSet.template get<std::string>("testString");
+ typedef Dune::FieldVector<unsigned, 5> FVector;
+ FVector testFVector = parameterSet.template get<FVector>("testVector");
+ typedef std::vector<unsigned> SVector;
+ SVector testSVector = parameterSet.template get<SVector>("testVector");
+ if(testSVector.size() != 5)
+ DUNE_THROW(Dune::Exception, "Testing std::vector<unsigned>: expected "
+ "size()==5, got size()==" << testSVector.size());
+ for(unsigned i = 0; i < 5; ++i)
+ if(testFVector[i] != testSVector[i])
+ DUNE_THROW(Dune::Exception,
+ "testFVector[" << i << "]==" << testFVector[i] << " but "
+ "testSVector[" << i << "]==" << testSVector[i]);
+ if (parameterSet.template get<std::string>("Foo.bar") != "2")
+ DUNE_THROW(Dune::Exception, "Failed to write subtree entry");
+ if (parameterSet.sub("Foo").template get<std::string>("bar") != "2")
+ DUNE_THROW(Dune::Exception, "Failed to write subtree entry");
+}
+
+void testOptionsParserResults(std::vector<std::string> args,
+ const std::vector<std::string> & keywords,
+ unsigned int required,
+ bool allow_more,
+ bool overwrite,
+ std::string foo, std::string bar,
+ const std::string referr = "")
+{
+ Dune::ParameterTree pt;
+ try {
+ char * argv[10];
+ for (std::size_t i = 0; i < args.size(); ++i)
+ argv[i] = &args[i][0];
+ Dune::ParameterTreeParser::readNamedOptions(args.size(), argv, pt, keywords, required, allow_more, overwrite);
+ check_assert(referr == "");
+ }
+ catch (const Dune::ParameterTreeParserError & e)
+ {
+ std::string err = e.what();
+ std::size_t offset = err.find("]: ");
+ err = err.substr(offset + 3, err.find('\n') - offset - 3);
+ check_assert(referr == err);
+ }
+ if (foo != "" && foo != pt.get<std::string>("foo"))
+ DUNE_THROW(Dune::Exception, "Options parser failed... foo = "
+ << pt.get<std::string>("foo") << " != " << foo);
+ if (bar != "" && bar != pt.get<std::string>("bar"))
+ DUNE_THROW(Dune::Exception, "Options parser failed... bar = "
+ << pt.get<std::string>("bar") << " != " << bar);
+}
+
+void testOptionsParser()
+{
+ std::vector<std::string> keywords = { "foo", "bar" };
+ // check normal behaviour
+ {
+ std::vector<std::string> args = { "progname", "--bar=ligapokal", "peng", "--bar=ligapokal", "--argh=other"};
+ testOptionsParserResults(args,keywords,keywords.size(),true,true,"peng","ligapokal",
+ "" /* no error */ );
+ }
+ // bail out on overwrite
+ {
+ std::vector<std::string> args = { "progname", "--bar=ligapokal", "peng", "--bar=ligapokal", "--argh=other"};
+ testOptionsParserResults(args,keywords,keywords.size(),true,false,"peng","ligapokal",
+ "parameter bar already specified");
+ }
+ // bail out on unknown options
+ {
+ std::vector<std::string> args = { "progname", "--bar=ligapokal", "peng", "--bar=ligapokal", "--argh=other"};
+ testOptionsParserResults(args,keywords,keywords.size(),false,true,"peng","ligapokal",
+ "unknown parameter argh");
+ }
+ // bail out on missing parameter
+ {
+ std::vector<std::string> args = { "progname", "--bar=ligapokal"};
+ testOptionsParserResults(args,keywords,keywords.size(),true,true,"","ligapokal",
+ "missing parameter(s) ... foo");
+ }
+ // check optional parameter
+ {
+ std::vector<std::string> args = { "progname", "--foo=peng"};
+ testOptionsParserResults(args,keywords,1,true,true,"peng","",
+ "" /* no error */);
+ }
+ // check optional parameter, but bail out on missing parameter
+ {
+ std::vector<std::string> args = { "progname", "--bar=ligapokal"};
+ testOptionsParserResults(args,keywords,1,true,true,"","ligapokal",
+ "missing parameter(s) ... foo");
+ }
+ // bail out on too many parameters
+ {
+ std::vector<std::string> args = { "progname", "peng", "ligapokal", "hurz"};
+ testOptionsParserResults(args,keywords,keywords.size(),true,true,"peng","ligapokal",
+ "superfluous unnamed parameter");
+ }
+ // bail out on missing value
+ {
+ std::vector<std::string> args = { "progname", "--foo=peng", "--bar=ligapokal", "--hurz"};
+ testOptionsParserResults(args,keywords,keywords.size(),true,true,"peng","ligapokal",
+ "value missing for parameter --hurz");
+ }
+}
+
+void testFS1527()
+{
+ { // Check that junk at the end is not accepted (int)
+ Dune::ParameterTree ptree;
+ check_throw(ptree["setting"] = "0.5"; ptree.get("setting", 0),
+ Dune::RangeError);
+ }
+ { // Check that junk at the end is not accepted (double)
+ Dune::ParameterTree ptree;
+ check_throw(ptree["setting"] = "0.5 junk"; ptree.get("setting", 0.0),
+ Dune::RangeError);
+ }
+}
+
+// check that negative values can be given on the command line
+void testFS1523()
+{
+ static char arg0[] = "progname";
+ static char arg1[] = "-setting";
+ static char arg2[] = "-1";
+ static char *argv[] = { arg0, arg1, arg2, NULL };
+ int argc = sizeof argv / sizeof (char *) - 1;
+
+ Dune::ParameterTree ptree;
+ Dune::ParameterTreeParser::readOptions(argc, argv, ptree);
+
+ check_assert(ptree.get<int>("setting") == -1);
+}
+
+void check_recursiveTreeCompare(const Dune::ParameterTree & p1,
+ const Dune::ParameterTree & p2)
+{
+ check_assert(p1.getValueKeys() == p2.getValueKeys());
+ check_assert(p1.getSubKeys() == p2.getSubKeys());
+ typedef Dune::ParameterTree::KeyVector::const_iterator Iterator;
+ for (Iterator it = p1.getValueKeys().begin();
+ it != p1.getValueKeys().end(); ++it)
+ check_assert(p1[*it] == p2[*it]);
+ for (Iterator it = p1.getSubKeys().begin();
+ it != p1.getSubKeys().end(); ++it)
+ check_recursiveTreeCompare(p1.sub(*it), p2.sub(*it));
+}
+
+// test report method and read back in
+void testReport()
+{
+ std::stringstream s;
+ s << "foo.i = 1 \n foo.bar.peng = hurz";
+ Dune::ParameterTree ptree;
+ Dune::ParameterTreeParser::readINITree(s, ptree);
+
+ std::stringstream s2;
+ ptree.report(s2);
+ Dune::ParameterTree ptree2;
+ Dune::ParameterTreeParser::readINITree(s2, ptree2);
+ check_recursiveTreeCompare(ptree, ptree2);
+}
+
+int main()
+{
+ try {
+ // read config
+ std::stringstream s;
+ s << "x1 = 1 # comment\n"
+ << "x2 = hallo\n"
+ << "x3 = no\n"
+ << "array = 1 2 3 4 5\t6 7 8\n"
+ << "\n"
+ << "[Foo] # another comment\n"
+ << "peng = ligapokal\n";
+
+ auto c = Dune::ParameterTreeParser::readINITree(s);
+
+ // test modifying and reading
+ testmodify<Dune::ParameterTree>(c);
+ try {
+ c.get<int>("testInt");
+ DUNE_THROW(Dune::Exception, "unexpected shallow copy of ParameterTree");
+ }
+ catch (Dune::RangeError & r) {}
+
+ // test for complex
+ c.get<std::complex<double>>("x1");
+
+ // more const tests
+ testparam<Dune::ParameterTree>(c);
+
+ // check the command line parser
+ testOptionsParser();
+
+ // check report
+ testReport();
+
+ // check for specific bugs
+ testFS1527();
+ testFS1523();
+ }
+ catch (Dune::Exception & e)
+ {
+ std::cout << e << std::endl;
+ return 1;
+ }
+ return 0;
+}
--- /dev/null
+#include <config.h>
+#include <cassert>
+#include <iostream>
+#include <vector>
+
+#if ! HAVE_PARMETIS
+#error "ParMETIS is required for this test."
+#endif
+
+#include <mpi.h>
+
+#if HAVE_PTSCOTCH_PARMETIS
+extern "C" {
+ #include <ptscotch.h>
+}
+#endif
+
+extern "C" {
+ #include <parmetis.h>
+}
+
+int main(int argc, char **argv)
+{
+#if defined(REALTYPEWIDTH)
+ using real_t = ::real_t;
+#else
+ using real_t = float;
+#endif
+
+#if defined(IDXTYPEWIDTH)
+ using idx_t = ::idx_t;
+#elif HAVE_PTSCOTCH_PARMETIS
+ using idx_t = SCOTCH_Num;
+#else
+ using idx_t = int;
+#endif
+
+ MPI_Init(&argc, &argv);
+
+ MPI_Comm comm;
+ MPI_Comm_dup(MPI_COMM_WORLD, &comm);
+
+ int rank, size;
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_size(comm, &size);
+
+ // This test is design for 3 cores
+ assert(size == 3);
+
+ // local adjacency structure of the graph
+ std::vector<idx_t> xadj; // size n+1
+ std::vector<idx_t> adjncy; // size 2*m
+
+ if (rank == 0) {
+ xadj = std::vector<idx_t>{0,2,5,8,11,13};
+ adjncy = std::vector<idx_t>{1,5,0,2,6,1,3,7,2,4,8,3,9};
+ }
+ else if (rank == 1) {
+ xadj = std::vector<idx_t>{0,3,7,11,15,18};
+ adjncy = std::vector<idx_t>{0,6,10,1,5,7,11,2,6,8,12,3,7,9,13,4,8,14};
+ }
+ else if (rank == 2) {
+ xadj = std::vector<idx_t>{0,2,5,8,11,13};
+ adjncy = std::vector<idx_t>{5,11,6,10,12,7,11,13,8,12,14,9,13};
+ }
+
+ // Array describing how the vertices of the graph are distributed among the processors.
+ std::vector<idx_t> vtxdist{0,5,10,15};
+
+ // No weights
+ idx_t wgtflag = 0;
+ // C-style numbering that starts from 0.
+ idx_t numflag = 0;
+ // Number of weights that each vertex has
+ idx_t ncon = 1;
+ // Number of sub-domains
+ idx_t nparts = size;
+ // Fraction of vertex weight that should be distributed to each sub-domain for each
+ // balance constraint
+ std::vector<real_t> tpwgts(ncon * nparts, 1.0/nparts);
+ std::vector<real_t> ubvec(ncon, 1.05);
+ std::vector<idx_t> options{0, 0, 0};
+
+ idx_t edgecut;
+ std::vector<idx_t> part(xadj.size()-1, 0);
+
+ ParMETIS_V3_PartKway(vtxdist.data(), xadj.data(), adjncy.data(),
+ nullptr, nullptr, &wgtflag, &numflag, &ncon, &nparts, tpwgts.data(),
+ ubvec.data(), options.data(), &edgecut, part.data(), &comm);
+
+ for (std::size_t part_i = 0; part_i < part.size(); ++part_i) {
+ std::cout << "[" << rank << "] " << part_i << " => " << part[part_i] << std::endl;
+ }
+
+ MPI_Finalize();
+ return 0;
+}
\ No newline at end of file
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#if HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <iostream>
+#include <ostream>
+#include <string>
+
+#include <dune/common/exceptions.hh>
+#include <dune/common/ios_state.hh>
+#include <dune/common/path.hh>
+
+void setCode(int& code, bool status) {
+ if(!status)
+ code = 1;
+ else
+ if(code == 77)
+ code = 0;
+}
+
+void concatPathsTests(int& code) {
+ typedef const char* const triple[3];
+ static const triple data[] = {
+ {"a" , "b" , "a/b" },
+ {"/a", "b" , "/a/b"},
+ {"a/", "b" , "a/b" },
+ {"a" , "b/", "a/b/"},
+ {"..", "b" , "../b"},
+ {"a" , "..", "a/.."},
+ {"." , "b" , "./b" },
+ {"a" , "." , "a/." },
+ {"" , "b" , "b" },
+ {"a" , "" , "a" },
+ {"" , "" , "" },
+ {NULL, NULL, NULL }
+ };
+ for(const triple* p = data; (*p)[0] != NULL; ++p) {
+ const std::string& result = Dune::concatPaths((*p)[0], (*p)[1]);
+ bool success = result == (*p)[2];
+ setCode(code, success);
+ if(!success)
+ std::cerr << "concatPaths(\"" << (*p)[0] << "\", "
+ << "\"" << (*p)[1] << "\"): got \"" << result << "\", "
+ << "expected \"" << (*p)[2] << "\"" << std::endl;
+ }
+}
+
+void processPathTests(int& code) {
+ typedef const char* const pair[3];
+ static const pair data[] = {
+ {"" , "" },
+ {"." , "" },
+ {"./" , "" },
+ {"a/.." , "" },
+ {".." , "../" },
+ {"../a" , "../a/"},
+ {"a" , "a/" },
+ {"a//" , "a/" },
+ {"a///b" , "a/b/" },
+ {"/" , "/" },
+ {"/." , "/" },
+ {"/.." , "/" },
+ {"/a/.." , "/" },
+ {"/a" , "/a/" },
+ {"/a/" , "/a/" },
+ {"/../a/", "/a/" },
+ {NULL , NULL }
+ };
+ for(const pair* p = data; (*p)[0] != NULL; ++p) {
+ const std::string& result = Dune::processPath((*p)[0]);
+ bool success = result == (*p)[1];
+ setCode(code, success);
+ if(!success)
+ std::cerr << "processPath(\"" << (*p)[0] << "\"): got "
+ << "\"" << result << "\", expected "
+ << "\"" << (*p)[1] << "\"" << std::endl;
+ }
+}
+
+void prettyPathTests(int& code) {
+ struct triple {
+ const char* p;
+ bool isDir;
+ const char* result;
+ };
+ static const triple data[] = {
+ {"" , true , "." },
+ {"" , false, "." },
+ {"." , true , "." },
+ {"." , false, "." },
+ {"./" , true , "." },
+ {"./" , false, "." },
+ {"a/.." , true , "." },
+ {"a/.." , false, "." },
+ {".." , true , ".." },
+ {".." , false, ".." },
+ {"../a" , true , "../a/"},
+ {"../a" , false, "../a" },
+ {"a" , true , "a/" },
+ {"a" , false, "a" },
+ {"a//" , true , "a/" },
+ {"a//" , false, "a" },
+ {"a///b" , true , "a/b/" },
+ {"a///b" , false, "a/b" },
+ {"/" , true , "/" },
+ {"/" , false, "/" },
+ {"/." , true , "/" },
+ {"/." , false, "/" },
+ {"/.." , true , "/" },
+ {"/.." , false, "/" },
+ {"/a/.." , true , "/" },
+ {"/a/.." , false, "/" },
+ {"/a" , true , "/a/" },
+ {"/a" , false, "/a" },
+ {"/a/" , true , "/a/" },
+ {"/a/" , false, "/a" },
+ {"/../a/", true , "/a/" },
+ {"/../a/", false, "/a" },
+ {NULL, false, NULL }
+ };
+
+ Dune::ios_base_all_saver state(std::cerr);
+ std::cerr << std::boolalpha;
+
+ for(const triple* p = data; p->p != NULL; ++p) {
+ const std::string& result = Dune::prettyPath(p->p, p->isDir);
+ bool success = result == p->result;
+ setCode(code, success);
+ if(!success)
+ std::cerr << "prettyPath(\"" << p->p << "\", " << p->isDir << "): got "
+ << "\"" << result << "\", expected \"" << p->result << "\""
+ << std::endl;
+ }
+}
+
+void relativePathTests(int& code) {
+ typedef const char* const triple[3];
+ static const triple data[] = {
+ {"" , "" , "" },
+ {"" , "b" , "b/" },
+ {"" , "..", "../" },
+ {"a" , "" , "../" },
+ {"a" , "b" , "../b/"},
+ {"/" , "/" , "" },
+ {"/a", "/" , "../" },
+ {"/" , "/b", "b/" },
+ {"/a", "/b", "../b/"},
+ {NULL, NULL, NULL }
+ };
+
+ for(const triple* p = data; (*p)[0] != NULL; ++p) {
+ const std::string& result = Dune::relativePath((*p)[0], (*p)[1]);
+ bool success = result == (*p)[2];
+ setCode(code, success);
+ if(!success)
+ std::cerr << "relativePath(\"" << (*p)[0] << "\", "
+ << "\"" << (*p)[1] << "\"): got \"" << result << "\", "
+ << "expected \"" << (*p)[2] << "\"" << std::endl;
+ }
+
+ typedef const char* const pair[2];
+ static const pair except_data[] = {
+ {"" , "/" },
+ {"a" , "/" },
+ {"/" , "" },
+ {"/" , "b" },
+ {"..", "" },
+ {NULL, NULL}
+ };
+
+ for(const pair* p = except_data; (*p)[0] != NULL; ++p) {
+ std::string result;
+ try {
+ result = Dune::relativePath((*p)[0], (*p)[1]);
+ }
+ catch(const Dune::NotImplemented&) {
+ setCode(code, true);
+ continue;
+ }
+ setCode(code, false);
+ std::cerr << "relativePath(\"" << (*p)[0] << "\", "
+ << "\"" << (*p)[1] << "\"): got \"" << result << "\", "
+ << "expected exception thrown" << std::endl;
+ }
+}
+
+int main ()
+{
+ try {
+ int code = 77;
+
+ concatPathsTests(code);
+ processPathTests(code);
+ prettyPathTests(code);
+ relativePathTests(code);
+
+ return code;
+ }
+ catch(const Dune::Exception& e) {
+ std::cerr << "Exception thrown: " << e << std::endl;
+ throw;
+ }
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <cstdint>
+#include <vector>
+
+#include <dune/common/poolallocator.hh>
+#include <dune/common/fmatrix.hh>
+
+using namespace Dune;
+
+struct UnAligned
+{
+ char t;
+ char s;
+ char k;
+};
+
+
+
+template<std::size_t size, typename T>
+struct testPoolMain
+{
+ static int test()
+ {
+ int ret=0;
+
+ Pool<T,size> pool;
+
+ int elements = Pool<T,size>::elements;
+ //int poolSize = Pool<T,size>::size;
+ //int chunkSize = Pool<T,size>::chunkSize;
+ //int alignedSize = Pool<T,size>::alignedSize;
+
+ std::vector<std::uintptr_t> oelements(10*elements);
+
+ typedef typename Pool<T,size>::Chunk Chunk;
+
+ //Fill 10 chunks
+ for(int chunk=0; chunk < 10; ++chunk) {
+ //std::cout<< std::endl<<"Chunk "<<chunk<<" ";
+ std::uintptr_t element = reinterpret_cast<std::uintptr_t>(pool.allocate());
+ //void* celement = reinterpret_cast<void*>(element);
+ //std::cout << element<<" "<< celement<<", "<<std::endl;
+
+ Chunk* currentChunk = pool.chunks_;
+
+ assert(element==reinterpret_cast<std::uintptr_t>(currentChunk->chunk_));
+ std::uintptr_t end = reinterpret_cast<std::uintptr_t>(currentChunk->chunk_)+Pool<T,size>::chunkSize;
+
+ if(element< reinterpret_cast<std::uintptr_t>(currentChunk->chunk_))
+ {
+ std::cerr <<" buffer overflow during first alloc: "<<reinterpret_cast<std::uintptr_t>(currentChunk->chunk_)
+ <<">"<<element<<"+"<<sizeof(T)<<std::endl;
+ return ++ret;
+ }
+
+ if(end < element + sizeof(T)) {
+ std::cerr <<" buffer overflow during first alloc: "<<end<<"<"<<element<<"+"<<sizeof(T)<<std::endl;
+ return ++ret;
+ }
+
+ oelements[chunk*elements]=element;
+
+ for(int i=1; i < elements; i++)
+ {
+ element = reinterpret_cast<std::uintptr_t>(pool.allocate());
+ //celement = reinterpret_cast<void*>(element);
+ //std::cout << element<<" "<<celement<<", "<<std::endl;
+
+ if(element< reinterpret_cast<std::uintptr_t>(currentChunk->chunk_)) {
+ std::cerr <<" buffer underflow during first alloc: "<<reinterpret_cast<std::uintptr_t>(currentChunk->chunk_)
+ <<">"<<element<<"+"<<sizeof(T)<<std::endl;
+ return ++ret;
+ }
+
+ if(end < element + sizeof(T)) {
+ std::cerr <<" buffer overflow during "<<i<<" alloc: "<<end<<"<"<<element+sizeof(T)<<std::endl;
+ return ++ret;
+
+ }
+
+ if(oelements[chunk*elements+i-1]+sizeof(T)>element) {
+ std::cerr<<"allocated elements overlap!"<<std::endl;
+ return ++ret;
+ }
+
+ oelements[chunk*elements+i]=element;
+ }
+ }
+
+
+
+ for(int i=0; i < elements*10; ++i)
+ pool.free(reinterpret_cast<void*>(oelements[i]));
+
+ return ret;
+ }
+};
+
+template<typename T>
+int testPool()
+{
+ const std::size_t size = sizeof(T)>=2 ? sizeof(T)-2 : 0;
+
+ int ret=0;
+
+ std::cout<<"Checking "<<typeid(T).name()<<" sizeof="<<sizeof(T)<<" with size "<< size<<
+ " alignment="<< alignof(T) <<std::endl;
+
+ ret += testPoolMain<0,T>::test();
+ ret += testPoolMain<size,T>::test();
+ ret += testPoolMain<5*size,T>::test();
+ ret += testPoolMain<11*size,T>::test();
+ ret += testPoolMain<33*size,T>::test();
+
+ return ret;
+}
+
+int testPoolAllocator()
+{
+ int ret=0;
+ PoolAllocator<double,10> pool;
+ double *d=pool.allocate(1);
+ PoolAllocator<float,5> pool1=pool;
+ PoolAllocator<double,10> pool2=pool;
+ try
+ {
+ pool2.deallocate(d,1);
+#ifndef NDEBUG
+ ++ret;
+ std::cerr<<"ERROR: deallocation should not work with copied allocators."<<std::endl;
+#endif
+ }
+ catch(const std::bad_alloc&)
+ {}
+ pool1.allocate(1);
+ double *d1=pool2.allocate(1);
+ pool.deallocate(d,1);
+ pool2.deallocate(d1,1);
+ pool2.allocate(1);
+ return ret;
+}
+int main(int, char **)
+{
+ int ret=0;
+
+ ret += testPool<int>();
+
+ ret+= testPool<double>();
+
+ ret+= testPool<char>();
+
+ ret += testPool<Dune::FieldMatrix<double,10,10> >();
+
+ ret+=testPoolAllocator();
+
+ std::cout<< alignof(UnAligned) <<" "<<sizeof(UnAligned)<<std::endl;
+
+ ret += testPool<UnAligned>();
+
+ return ret;
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+#include <config.h>
+
+#include <iostream>
+
+#include <dune/common/exceptions.hh>
+#include <dune/common/math.hh>
+#include <dune/common/power.hh>
+
+using namespace Dune;
+
+int main (int argc, char** argv) try
+{
+ // Zero and positive powers
+ if (power(4,0) != 1)
+ DUNE_THROW(MathError, "power(4,0) does not compute the correct result");
+
+ if (power(4,1) != 4)
+ DUNE_THROW(MathError, "power(4,1) implementation does not compute the correct result");
+
+ if (power(4,2) != 16)
+ DUNE_THROW(MathError, "power(4,2) implementation does not compute the correct result");
+
+ if (power(4,3) != 64)
+ DUNE_THROW(MathError, "power(4,3) implementation does not compute the correct result");
+
+ // Negative powers
+ if (power(4.0,-1) != 0.25)
+ DUNE_THROW(MathError, "power(4,-1) implementation does not compute the correct result");
+
+ if (power(4.0,-2) != 0.0625)
+ DUNE_THROW(MathError, "power(4,-2) implementation does not compute the correct result");
+
+ if (power(4.0,-3) != 0.015625)
+ DUNE_THROW(MathError, "power(4,-3) implementation does not compute the correct result");
+
+ // Test whether the result can be used in a compile-time expression
+ enum { dummy = power(2,2) };
+
+ // Test legacy power implementation
+ if (Power<0>::eval(4) != 1)
+ DUNE_THROW(MathError, "Power implementation does not compute the correct result");
+
+ if (Power<1>::eval(4) != 4)
+ DUNE_THROW(MathError, "Power implementation does not compute the correct result");
+
+ if (Power<2>::eval(4) != 16)
+ DUNE_THROW(MathError, "Power implementation does not compute the correct result");
+
+ if (Power<3>::eval(4) != 64)
+ DUNE_THROW(MathError, "Power implementation does not compute the correct result");
+
+ return 0;
+}
+catch (Exception& e)
+{
+ std::cout << e.what() << std::endl;
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+#include "config.h"
+
+#include <cassert>
+#include <iostream>
+
+#include <dune/common/fvector.hh>
+#include <dune/common/fmatrix.hh>
+#include <dune/common/float_cmp.hh>
+#include <dune/common/quadmath.hh>
+#include <dune/common/test/testsuite.hh>
+
+using namespace Dune;
+
+template <class T>
+struct Comparator
+{
+ Comparator(T tol)
+ : tol_(tol)
+ {}
+
+ bool operator()(T const& x, T const& y)
+ {
+ return Dune::FloatCmp::eq<T,FloatCmp::absolute>(x, y, tol_);
+ }
+
+private:
+ T tol_;
+};
+
+int main()
+{
+ // check vector and matrix type with Float128 field type
+ TestSuite test{};
+ Comparator<Float128> cmp{std::numeric_limits<Float128>::epsilon() * 8};
+ Comparator<Float128> weakcmp{cbrt(std::numeric_limits<Float128>::epsilon())};
+
+ // implicit conversion
+ Float128 x1 = 1;
+ Float128 x2 = 1.0f;
+ Float128 x3 = 1.0;
+ Float128 x4 = 1.0l;
+
+ [[maybe_unused]] int z1 = x1;
+ [[maybe_unused]] float z2 = x2;
+ [[maybe_unused]] double z3 = x3;
+ [[maybe_unused]] long double z4 = x4;
+
+ // field-vector
+ FieldVector<Float128,3> v{1,2,3}, x;
+ FieldMatrix<Float128,3,3> M{ {1,2,3}, {2,3,4}, {3,4,6} }, A;
+ FieldMatrix<Float128,3,3> M2{ {1,2,3}, {2,3,4}, {3,4,7} };
+
+ auto y1 = v.one_norm();
+ test.check(cmp(y1, 6.q), "vec.one_norm()");
+
+ auto y2 = v.two_norm();
+ test.check(cmp(y2, sqrtq(14.q)), "vec.two_norm()");
+
+ auto y3 = v.infinity_norm();
+ test.check(cmp(y3, 3.q), "vec.infinity_norm()");
+
+ M.mv(v, x); // x = M*v
+ M.mtv(v, x); // x = M^T*v
+ M.umv(v, x); // x+= M*v
+ M.umtv(v, x); // x+= M^T*v
+ M.mmv(v, x); // x-= M*v
+ M.mmtv(v, x); // x-= M^T*v
+
+ auto w1 = M.infinity_norm();
+ test.check(cmp(w1, 13.q), "mat.infinity_norm()");
+
+ auto w2 = M.determinant();
+ test.check(cmp(w2, -1.q), "mat.determinant()");
+
+ M.solve(v, x); // x = M^(-1)*v
+
+ [[maybe_unused]] auto M3 = M.leftmultiplyany(M2);
+ [[maybe_unused]] auto M4 = M.rightmultiplyany(M2);
+
+ using namespace FMatrixHelp;
+
+ invertMatrix(M,A);
+
+ // test cmath functions for Float128 type
+ using T = Float128;
+ test.check(cmp(T(0.5), T("0.5")), "string constructor");
+
+ test.check(cmp(abs(T{-1}),T{1}), "abs");
+ test.check(cmp(fabs(T{-1}),T{1}), "fabs");
+
+ test.check(cmp(cos(acos(T{0.5})),T{0.5}), "cos(acos)");
+ test.check(cmp(cosh(acosh(T{1.5})),T{1.5}), "cosh(acosh)");
+ test.check(cmp(sin(asin(T{0.5})),T{0.5}), "sin(asin)");
+ test.check(cmp(sinh(asinh(T{0.5})),T{0.5}), "sinh(asinh)");
+ test.check(cmp(tan(atan(T{0.5})),T{0.5}), "tan(atan)");
+ test.check(cmp(atan2(T{1},T{2}), atan(T{0.5})), "atan2");
+ test.check(cmp(tanh(atanh(T{0.5})),T{0.5}), "tanh(atanh)");
+
+ test.check(cmp(fdim(T{4},T{1}),T{3}), "fdim"); // a > b ? a - b : +0
+ test.check(cmp(fma(T{0.5},T{0.4},T{1.8}),(T{0.5} * T{0.4}) + T{1.8}), "fma");
+ test.check(cmp(fmax(T{0.6},T{0.4}),T{0.6}), "fmax");
+ test.check(cmp(fmin(T{0.6},T{0.4}),T{0.4}), "fmin");
+ test.check(cmp(hypot(T{1.6}, T{2.3}), sqrt(T{1.6}*T{1.6} + T{2.3}*T{2.3})), "hypot");
+ // ilogb
+ test.check(cmp(llrint(T{2.3}),(long long int)(2)), "llrint");
+ test.check(cmp(lrint(T{2.3}),(long int)(2)), "lrint");
+ test.check(cmp(rint(T{2.3}),T{2}), "lrint");
+ test.check(cmp(llround(T{2.3}),(long long int)(2)), "llround");
+ test.check(cmp(lround(T{2.3}),(long int)(2)), "lround");
+ test.check(cmp(round(T{2.3}),T{2}), "round");
+ test.check(cmp(nearbyint(T{2.3}),T{2}), "nearbyint");
+ test.check(cmp(trunc(T{2.7}),T{2}), "trunc");
+ test.check(cmp(ceil(T{1.6}),T{2}), "ceil");
+ test.check(cmp(floor(T{1.6}),T{1}), "floor");
+
+ test.check(cmp(log(exp(T{1.5})),T{1.5}), "log(exp)");
+ test.check(cmp(exp(T{0.2}+T{0.4}), exp(T{0.2})*exp(T{0.4})), "exp"); // exp(a+b) = exp(a)*exp(b)
+ test.check(cmp(expm1(T{0.6}),exp(T{0.6})-T{1}), "expm1");
+ test.check(cmp(log10(T{1000}),T{3}), "log10");
+ test.check(cmp(log2(T{8}),T{3}), "log2");
+ test.check(cmp(log1p(T{1.6}),log(T{1} + T{1.6})), "log1p");
+ // nextafter
+
+ // these two functions produce larger errors
+ test.check(weakcmp(fmod(T{5.1},T{3}),T{2.1}), "fmod");
+ test.check(weakcmp(remainder(T{5.1},T{3}),T{-0.9}), "remainder");
+
+ test.check(cmp(pow(T{2},T{3}),T{8}), "pow");
+ test.check(cmp(pow(T{M_PIq},T{3}),pow(T{M_PIq},3)), "pow"); // compare pow with float exponent and integer exponent
+ test.check(cmp(cbrt(T{0.5*0.5*0.5}),T{0.5}), "cbrt");
+ test.check(cmp(sqrt(T{4}),T{2}), "sqrt");
+
+ test.check(cmp(erf(T{0}),T{0}), "erf");
+ test.check(cmp(erfc(T{0.6}), T{1}-erf(T{0.6})), "erfc");
+ test.check(cmp(lgamma(T{3}),log(T{2})), "lgamma");
+ test.check(cmp(tgamma(T{3}),T{2}), "tgamma");
+}
--- /dev/null
+#include "config.h"
+
+#include <array>
+#include <map>
+#include <vector>
+#include <numeric>
+#include <type_traits>
+
+#include <dune/common/hybridutilities.hh>
+#include <dune/common/iteratorrange.hh>
+#include <dune/common/rangeutilities.hh>
+#include <dune/common/typetraits.hh>
+#include <dune/common/diagonalmatrix.hh>
+#include <dune/common/test/testsuite.hh>
+#include <dune/common/test/iteratortest.hh>
+
+
+template<class R>
+auto checkRangeIterators(R&& r)
+{
+ auto it = r.begin();
+ auto end = r.end();
+ auto op = [](const auto& x){};
+ return (testConstIterator(it, end, op)==0);
+}
+
+template<class R>
+auto checkRangeSize(R&& r)
+{
+ std::size_t counter = 0;
+ for([[maybe_unused]] auto&& dummy : r)
+ ++counter;
+ return (r.size()==counter);
+}
+
+template<class R, class V>
+auto checkRandomAccessNumberRangeSums(R&& r, V sum, V first, V last)
+{
+ bool passed = true;
+ passed = passed and (std::accumulate(r.begin(), r.end(), 0) == sum);
+ passed = passed and (std::accumulate(r.begin()+1, r.end(), 0) == (sum-first));
+ passed = passed and (std::accumulate(r.begin(), r.end()-1, 0) == (sum-last));
+ passed = passed and (std::accumulate(r.begin()+1, r.end()-1, 0) == (sum-first-last));
+ return passed;
+}
+
+template<class T>
+struct is_const_reference : public std::conjunction<std::is_reference<T>, std::is_const<std::remove_reference_t<T>>>
+{};
+
+template<class T>
+struct is_mutable_reference : public std::conjunction<std::is_reference<T>, std::negation<is_const_reference<T>>>
+{};
+
+
+auto testTransformedRangeView()
+{
+ Dune::TestSuite suite("Check transformedRangeView()");
+
+ // Check transformedRangeView with container range
+ Dune::Hybrid::forEach(std::make_tuple(std::array<int,3>({1,2,3}), std::vector<int>({1,2,3})), [&](auto&& a)
+ {
+ auto a_backup = a;
+ // Pass original range by l-value, modify it, and then traverse.
+ // This should traverse the modified original range.
+ {
+ auto r = Dune::transformedRangeView(a, [](auto&& x) { return 2*x;});
+ a[0] = 2;
+ suite.check(checkRandomAccessNumberRangeSums(r, 14, 4, 6))
+ << "incorrect values in transformedRangeView of l-value";
+ suite.check(checkRangeIterators(r))
+ << "iterator test fails for transformedRangeView of l-value";
+ suite.check(checkRangeSize(r))
+ << "checking size fails for transformedRangeView of l-value";
+ a = a_backup;
+ }
+ // Pass original range by const l-value, modify it, and then traverse.
+ // This should traverse the modified original range.
+ {
+ const auto& a_const = a;
+ auto r = Dune::transformedRangeView(a_const, [](auto&& x) { return 2*x;});
+ a[0] = 2;
+ suite.check(checkRandomAccessNumberRangeSums(r, 14, 4, 6))
+ << "incorrect values in transformedRangeView of const l-value";
+ suite.check(checkRangeIterators(r))
+ << "iterator test fails for transformedRangeView of const l-value";
+ suite.check(checkRangeSize(r))
+ << "checking size fails for transformedRangeView of const l-value";
+ a = a_backup;
+ }
+ // Modify original range, pass it by r-value, restore it, and then traverse.
+ // This should traverse a modified copy of the range and not the restored
+ // original one.
+ {
+ a[0] = 2;
+ auto r = Dune::transformedRangeView(std::move(a), [](auto&& x) { return 2*x;});
+ a = a_backup;
+ suite.check(checkRandomAccessNumberRangeSums(r, 14, 4, 6))
+ << "incorrect values in transformedRangeView of r-value";
+ suite.check(checkRangeIterators(r))
+ << "iterator test fails for transformedRangeView of r-value";
+ suite.check(checkRangeSize(r))
+ << "checking size fails for transformedRangeView of r-value";
+ }
+ // Check if returning real references in the transformation works
+ {
+ auto r = Dune::transformedRangeView(a, [](auto&& x) -> decltype(auto) { return x;});
+ suite.check(is_mutable_reference<decltype(*r.begin())>::value)
+ << "iterator with mutable-reference returning transformation does not return mutable references";
+ suite.check(&(*(r.begin())) == &(a[0]))
+ << "reference points to wrong location";
+ (*r.begin()) = 0;
+ suite.check(a[0] == 0)
+ << "modifying range by reference returning transformation failed";
+ a = a_backup;
+ }
+ // Check if returning real references in the transformation works
+ {
+ const auto& a_const = a;
+ auto r = Dune::transformedRangeView(a_const, [](auto&& x) -> decltype(auto) { return x;});
+ suite.check(is_const_reference<decltype(*r.begin())>::value)
+ << "iterator with const-reference returning transformation does not return const references";
+ suite.check(&(*(r.begin())) == &(a[0]))
+ << "reference points to wrong location";
+ }
+ // Check iterator based transformation
+ {
+ auto r = Dune::iteratorTransformedRangeView(a, [&](auto&& it) { return (*it)+(it-a.begin());});
+ suite.check(checkRandomAccessNumberRangeSums(r, 9, 1, 5))
+ << "incorrect values in transformedRangeView of l-value";
+ suite.check(checkRangeIterators(r))
+ << "iterator test fails for transformedRangeView of l-value";
+ suite.check(checkRangeSize(r))
+ << "checking size fails for transformedRangeView of l-value";
+ a = a_backup;
+ }
+ });
+ // Check transformedRangeView with on the fly range
+ {
+ auto r = Dune::transformedRangeView(Dune::range(10), [](auto&& x) { return 2*x;});
+ suite.check(checkRandomAccessNumberRangeSums(r, 90, 0, 18))
+ << "transformation of on-the-fly range gives incorrect results";
+ suite.check(checkRangeIterators(r))
+ << "iterator test fails for transformedRangeView";
+ suite.check(checkRangeSize(r))
+ << "checking size fails for transformedRangeView of on-the-fly range";
+ }
+ // Check if we can indirectly sort subrange via reference returning transformations
+ {
+ auto a = std::vector<int>{4,3,2,1,0};
+ auto r = Dune::transformedRangeView(Dune::range(1,4), [&](auto&& i) -> decltype(auto){ return a[i];});
+ std::sort(r.begin(), r.end());
+ suite.check(a == std::vector<int>{4,1,2,3,0})
+ << "sorting reference returning transformedRangeView failed";
+
+ auto r2 = Dune::transformedRangeView(std::array<int,3>{0, 2, 4}, [&](auto&& i) -> decltype(auto){ return a[i];});
+ std::sort(r2.begin(), r2.end());
+ suite.check(a == std::vector<int>{0,1,2,3,4})
+ << "sorting reference returning transformedRangeView failed";
+
+ // Remap values of certain keys in a std::map such that
+ // they are sorted according to the keys.
+ auto m = std::map<int, int>{{-1,5},{0,4}, {1,3}, {2,2}};
+ auto r3 = Dune::transformedRangeView(std::array<int,3>{1, -1, 2}, [&](auto&& i) -> decltype(auto){ return m[i];});
+ std::sort(r3.begin(), r3.end());
+ suite.check(m == std::map<int, int>{{1,2},{-1,3}, {2,5},{0,4}})
+ << "sorting reference returning transformedRangeView failed";
+ }
+ return suite;
+}
+
+
+auto testSparseRange()
+{
+ Dune::TestSuite suite("Check sparseRange()");
+
+ auto checkWithMatrix = [&suite](auto&& M) {
+ for(std::size_t i=0; i<M.size(); ++i)
+ {
+ auto it = M[i].begin();
+ auto end = M[i].end();
+ for(auto&& [M_ij, j] : Dune::sparseRange(M[i]))
+ {
+ suite.check(it!=end)
+ << "sparseRange() contains more entries than the original range";
+ suite.check(&M_ij == &M[i][j])
+ << "Entry obtained by sparseRange() does not point to actual range entry";
+ suite.check(&M_ij == &(*it))
+ << "Entry obtained by sparseRange() does not point to actual range entry";
+ ++it;
+ }
+ suite.check(it==end)
+ << "sparseRange() contains less entries than the original range";
+ }
+ };
+
+ auto M1 = Dune::DiagonalMatrix<double,1>({42});
+ checkWithMatrix(M1);
+ checkWithMatrix(std::as_const(M1));
+
+ auto M2 = Dune::DiagonalMatrix<double,2>({42, 41});
+ checkWithMatrix(M2);
+ checkWithMatrix(std::as_const(M2));
+
+ auto M3 = Dune::DiagonalMatrix<double,3>({42, 41, 40});
+ checkWithMatrix(M3);
+ checkWithMatrix(std::as_const(M3));
+
+ return suite;
+}
+
+
+
+int main()
+{
+ // Check IsIterable<> for https://gitlab.dune-project.org/core/dune-common/issues/58
+ static_assert(Dune::IsIterable< std::array<int, 3> >::value, "std::array<int> must be a range");
+ static_assert(Dune::IsIterable< Dune::IteratorRange<int*> >::value, "IteratorRange must be a range");
+ static_assert(!Dune::IsIterable< int >::value, "int must not be a range");
+
+ Dune::TestSuite suite;
+
+ // max_value, min_value
+ {
+ const int value = 12;
+ suite.check(Dune::max_value(value) == value);
+ suite.check(Dune::min_value(value) == value);
+
+ std::array<int, 3> values{-42, 0, 42};
+ suite.check(Dune::max_value(values) == 42)
+ << "maximum of values is 42, but got " << Dune::max_value(values);
+ suite.check(Dune::min_value(values) == -42)
+ << "minimum of values is -42, but got " << Dune::min_value(values);
+
+ std::array<int, 3> positiveValues{1, 2, 3};
+ suite.check(Dune::max_value(positiveValues) == 3)
+ << "maximum of positiveValues is 3, but got " << Dune::max_value(positiveValues);
+ suite.check(Dune::min_value(positiveValues) == 1)
+ << "minimum of positiveValues is 1, but got " << Dune::min_value(positiveValues);
+
+ std::array<int, 3> negativeValues{-1, -3, -1};
+ suite.check(Dune::max_value(negativeValues) == -1)
+ << "maximum of negativeValues is -1, but got " << Dune::max_value(negativeValues);
+ suite.check(Dune::min_value(negativeValues) == -3)
+ << "minimum of negativeValues is -3, but got " << Dune::min_value(negativeValues);
+ }
+
+ // any_true, all_true
+ {
+ const std::array<bool, 3> allTrue{true, true, true};
+ const std::array<bool, 3> allFalse{false, false, false};
+ const std::array<bool, 3> someTrue{false, true, false};
+
+ suite.check(Dune::any_true(allTrue))
+ << "any_true(allTrue) must be true";
+ suite.check(!Dune::any_true(allFalse))
+ << "any_true(allFalse) must be false";
+ suite.check(Dune::any_true(someTrue))
+ << "any_true(someTrue) must be true";
+
+ suite.check(Dune::all_true(allTrue))
+ << "all_true(allTrue) must be true";
+ suite.check(!Dune::all_true(allFalse))
+ << "all_true(allFalse) must be false";
+ suite.check(!Dune::all_true(someTrue))
+ << "all_true(someTrue) must be false";
+
+ const bool t = true;
+ const bool f = false;
+
+ suite.check(Dune::any_true(t))
+ << "any_true(true) must be true";
+ suite.check(!Dune::any_true(f))
+ << "any_true(false) must be false";
+
+ suite.check(Dune::all_true(t))
+ << "all_true(true) must be true";
+ suite.check(!Dune::all_true(f))
+ << "all_true(false) must be false";
+ }
+
+ // integer ranges
+ using Dune::range;
+ std::vector<int> numbers(range(6).begin(), range(6).end());
+ int sum = 0;
+ for( auto i : range(numbers.size()) )
+ sum += numbers[i];
+ suite.check(sum == 15) << "sum over range( 0, 6 ) must be 15.";
+ suite.check(range(sum, 100)[5] == 20) << "range(sum, 100)[5] must be 20.";
+ sum = 0;
+ for( auto i : range(-10, 11) )
+ sum += i;
+ suite.check(sum == 0) << "sum over range( -10, 11 ) must be 0.";
+
+ static_assert(std::is_same<decltype(range(std::integral_constant<int, 4>()))::integer_sequence, std::make_integer_sequence<int, 4>>::value,
+ "decltype(range(std::integral_constant<int, 4>))::integer_sequence must be the same as std::make_integer_sequence<int, 4>");
+
+ // Hybrid::forEach for integer ranges
+ Dune::Hybrid::forEach(range(std::integral_constant<int, 1>()), [] (auto &&i) {
+ static_assert(std::is_same<std::decay_t<decltype(i)>, std::integral_constant<int, 0>>::value,
+ "Hybrid::forEach(range(std::integral_constant<int, 1>()), ...) should only visit std::integral_constant<int, 0>.");
+ });
+
+
+ {
+ auto r = range(-10,11);
+ auto it = r.begin();
+ auto end = r.end();
+ auto op = [](const auto& x){};
+ suite.check(testConstIterator(it, end, op)==0)
+ << "iterator test fails for range(-10,11)";
+ }
+
+ suite.subTest(testTransformedRangeView());
+
+ suite.subTest(testSparseRange());
+
+ return suite.exit();
+
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <iostream>
+#include <unordered_map>
+
+#include <dune/common/test/testsuite.hh>
+#include <dune/common/classname.hh>
+#include <dune/common/reservedvector.hh>
+
+int main() {
+ Dune::TestSuite test;
+ // check that make_array works
+ Dune::ReservedVector<unsigned int, 8> rv = {3,2,1};
+ test.check(rv.size() == 3);
+ test.check(rv.back() == 1);
+ test.check(rv.front() == 3);
+
+ // check assignment from an initializer list
+ rv = {1,2,3,4};
+ test.check(rv.size() == 4);
+ test.check(rv.back() == 4);
+ test.check(rv.front() == 1);
+
+ // check push_back
+ rv.push_back(5);
+ test.check(rv.size() == 5);
+ test.check(rv.back() == 5);
+
+ // check copy constructor
+ Dune::ReservedVector<unsigned int, 8> rv2 = rv;
+ test.check(rv2[0] == 1 &&
+ rv2[1] == 2 &&
+ rv2[2] == 3 &&
+ rv2[3] == 4 &&
+ rv2[4] == 5);
+
+ // check pop_back
+ rv2.pop_back();
+ test.check(rv2.size() == 4);
+ test.check(rv2.back() == 4);
+
+ // make sure we can hash a reserved vector
+ std::hash< Dune::ReservedVector<unsigned int, 8> > rv_hash;
+ auto hash_value = rv_hash(rv);
+ auto hash_value2 = rv_hash(rv2);
+ test.check( hash_value != hash_value2 );
+
+ // try using an unordered map
+ std::unordered_map< Dune::ReservedVector<unsigned int, 8>, double > rv_map;
+ rv_map[rv] = 1.0;
+ rv_map[rv2] = 2.0;
+
+ // try and try again with a const ReservedVector
+ std::unordered_map< const Dune::ReservedVector<unsigned int, 8>, double> const_rv_map;
+
+ return 0;
+}
--- /dev/null
+#include <config.h>
+
+#include <iostream>
+#include <fstream>
+
+#include <scotch.h>
+
+#include <dune/common/exceptions.hh>
+
+// write graph to file
+void prepare (std::string filename)
+{
+ std::ofstream out(filename, std::ios_base::out);
+
+ // out << "0\n4 4\n0 000\n1 1\n1 0\n1 3\n1 2";
+
+ out << "0\n16 48\n0 000\n2 1 4\n3 0 2 5\n3 1 3 6\n2 2 7\n3 0 5 8\n4 1 4 6 9\n4 2 5 7 10\n3 3 6 11\n3 4 9 12\n4 5 8 10 13\n4 6 9 11 14\n3 7 10 15\n2 8 13\n3 9 12 14\n3 10 13 15\n2 11 14";
+}
+
+int main (int argc, char** argv)
+{
+ SCOTCH_errorProg (argv[0]);
+
+ // Initialize source graph
+ SCOTCH_Graph grafdat;
+ if (SCOTCH_graphInit (&grafdat) != 0) {
+ DUNE_THROW(Dune::Exception, "cannot initialize graph");
+ }
+
+ prepare("graph_file.grf");
+
+ FILE* fileptr = nullptr;
+ if ((fileptr = fopen ("graph_file.grf", "r")) == nullptr) {
+ DUNE_THROW(Dune::Exception, "cannot open file");
+ }
+
+ // Read source graph
+ if (SCOTCH_graphLoad (&grafdat, fileptr, -1, 0) != 0) {
+ DUNE_THROW(Dune::Exception, "cannot load graph");
+ }
+
+ fclose (fileptr);
+
+ if (SCOTCH_graphCheck (&grafdat) != 0) {
+ DUNE_THROW(Dune::Exception, "graph check failed");
+ }
+
+ SCOTCH_Num vertnbr = 0, edgenbr = 0;
+ SCOTCH_graphSize (&grafdat, &vertnbr, &edgenbr);
+
+ std::cout << "Number of vertices: " << vertnbr << std::endl;
+ std::cout << "Number of edges: " << edgenbr << std::endl;
+
+ SCOTCH_graphExit (&grafdat);
+
+ return EXIT_SUCCESS;
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+// make sure assert works even when not compiling for debugging
+#ifdef NDEBUG
+#undef NDEBUG
+#endif
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <dune/common/deprecated.hh>
+#include <dune/common/shared_ptr.hh>
+
+#include <cstdlib>
+
+
+class A {};
+class B : public A {};
+class C : A {};
+
+
+int main(){
+ using namespace Dune;
+ int ret=0;
+ {
+ // test shared_ptr for stack allocation
+ {
+ int i = 10;
+ std::shared_ptr<int> pi = stackobject_to_shared_ptr(i);
+ }
+
+ // test shared_ptr for stack allocation with down cast
+ {
+DUNE_NO_DEPRECATED_BEGIN
+ B b2;
+ std::shared_ptr<A> pa = stackobject_to_shared_ptr<A>(b2);
+DUNE_NO_DEPRECATED_END
+#ifdef SHARED_PTR_COMPILE_FAIL
+ C c;
+ pa = stackobject_to_shared_ptr<A>(c); // A is an inaccessible base of C
+#endif
+ }
+ }
+ return (ret);
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <dune/common/singleton.hh>
+#include <iostream>
+class Foo : public Dune::Singleton<Foo>
+{
+public:
+ Foo()
+ {
+ bytes = new char[1000];
+ }
+
+ ~Foo()
+ {
+ delete[] bytes;
+ }
+private:
+ char* bytes;
+};
+
+class Foo1
+{
+public:
+ Foo1()
+ {
+ bytes = new char[1000];
+ }
+
+ ~Foo1()
+ {
+ delete[] bytes;
+ }
+private:
+ char* bytes;
+};
+
+typedef Dune::Singleton<Foo1> FooSingleton;
+
+
+Foo* globalFoo = 0;
+Foo1* globalFoo1 = 0;
+
+void setFoo()
+{
+ globalFoo = &Foo::instance();
+}
+
+
+void setFoo1()
+{
+ globalFoo1 = &FooSingleton::instance();
+}
+
+int testFoo()
+{
+ if(globalFoo != &Foo::instance()) {
+ std::cerr<<" Foo is not a real singleton!"<<std::endl;
+ return 1;
+ }
+ return 0;
+}
+
+
+int testFoo1()
+{
+ if(globalFoo1 != &FooSingleton::instance()) {
+ std::cerr<<" Foo is not a real singleton!"<<std::endl;
+ return 1;
+ }
+ return 0;
+}
+
+int main()
+{
+ int ret=0;
+ {
+ Foo& foo = Foo::instance();
+ Foo& foo1 = Foo::instance();
+ if(&foo!=&foo1) {
+ std::cerr<<" Foo is not a real singleton!"<<std::endl;
+ ++ret;
+ }
+ }
+ setFoo();
+ ret += testFoo();
+ {
+ Foo1& foo = FooSingleton::instance();
+ Foo1& foo1 = FooSingleton::instance();
+ if(&foo!=&foo1) {
+ std::cerr<<" Foo is not a real singleton!"<<std::endl;
+ ++ret;
+ }
+ }
+ setFoo1();
+ return ret += testFoo1();
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <dune/common/sllist.hh>
+#include <dune/common/test/iteratortest.hh>
+#include <dune/common/poolallocator.hh>
+#include <iostream>
+
+class DoubleWrapper
+{
+public:
+ DoubleWrapper(double b)
+ : d(b)
+ {
+ std::cout<<"Constructed "<<this<<std::endl;
+ }
+
+ DoubleWrapper()
+ : d()
+ {
+ std::cout<<"Constructed "<<this<<std::endl;
+ }
+
+ DoubleWrapper(const DoubleWrapper& other)
+ : d(other.d)
+ {
+ std::cout<<"Copied "<<this<<" from "<<&other<<std::endl;
+ }
+
+ ~DoubleWrapper()
+ {
+ std::cout<<"Destructing "<<this<<std::endl;
+ }
+
+ operator double() const
+ {
+ return d;
+ }
+
+ bool operator==(const DoubleWrapper& other) const
+ {
+ return d == other.d;
+ }
+
+
+ bool operator!=(const DoubleWrapper& other) const
+ {
+ return d != other.d;
+ }
+
+private:
+ double d;
+};
+
+typedef Dune::PoolAllocator<int,8*1024-16> IntAllocator;
+//typedef std::allocator<int> IntAllocator;
+typedef Dune::PoolAllocator<double,8*1024-16> DoubleAllocator;
+//typedef std::allocator<double> DoubleAllocator;
+typedef Dune::PoolAllocator<DoubleWrapper,8*1024-16> DoubleWAllocator;
+//typedef std::allocator<DoubleWrapper> DoubleWAllocator;
+
+template<typename T, typename A>
+const T& tail(const Dune::SLList<T,A>& alist)
+{
+ typedef typename Dune::SLList<T,A>::const_iterator Iterator;
+ Iterator tail=alist.begin();
+
+ for(int i = alist.size() - 1; i > 0; --i)
+ ++tail;
+ return *tail;
+}
+
+template<typename T,class A>
+int check(const Dune::SLList<T,A>& alist, const T* vals)
+{
+ typedef typename Dune::SLList<T,A>::const_iterator iterator;
+ int i=0;
+ for(iterator iter = alist.begin(); iter != alist.end(); ++iter, i++) {
+ if( vals[i] != *iter ) {
+ std::cerr<<" List missmatch! "<<__FILE__<<":"<<__LINE__<<std::endl;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+
+template<typename T,class A>
+void randomizeListBack(Dune::SLList<T,A>& alist){
+ using namespace Dune;
+
+ srand(300);
+
+ int lowest=0, highest=1000, range=(highest-lowest)+1;
+
+ T vals[10];
+
+ for(int i=0; i < 10; i++) {
+ T d = T(range*(rand()/(RAND_MAX+1.0)));
+ alist.push_back(d);
+ vals[i]=d;
+ }
+
+ check(alist, vals);
+}
+
+template<typename T,class A>
+void randomizeListFront(Dune::SLList<T,A>& alist){
+ using namespace Dune;
+
+ srand(300);
+ T vals[10];
+
+ int lowest=0, highest=1000, range=(highest-lowest)+1;
+
+ for(int i=0; i < 10; i++) {
+ T d = T(range*(rand()/(RAND_MAX+1.0)));
+ alist.push_front(d);
+ vals[9-i]=d;
+ }
+
+ check(alist, vals);
+}
+int testAssign()
+{
+ typedef Dune::SLList<int,IntAllocator> List;
+ List alist, blist;
+
+ alist.push_back(3);
+ alist.push_back(4);
+ alist.push_back(5);
+
+ blist.push_back(-1);
+
+ blist=alist;
+ List::iterator biter=blist.begin(), aiter=alist.begin();
+ for(; aiter!=alist.end(); ++aiter, ++biter)
+ if(*aiter!=*biter) {
+ std::cerr<<"Asignment failed "<<__FILE__<<":"<<__LINE__<<std::endl;
+ return 1;
+ }
+ return 0;
+}
+
+int testDelete()
+{
+ typedef Dune::SLList<int,IntAllocator> List;
+ List alist;
+
+ alist.push_back(3);
+ alist.push_back(4);
+ alist.push_back(5);
+
+ List::ModifyIterator iter = alist.beginModify();
+ iter.remove();
+ if(*(alist.begin())!=4) {
+ std::cerr<<"delete next on position before head failed! "<<__FILE__<<":"<<__LINE__<<std::endl;
+ return 1;
+ }
+ if(*iter!=4) {
+ std::cerr<<"delete next failed! "<<__FILE__<<":"<<__LINE__<<std::endl;
+ return 1;
+ }
+ ++iter;
+ iter.remove();
+ if(iter!=alist.end()) {
+ std::cerr<<"delete next failed! "<<__FILE__<<":"<<__LINE__<<std::endl;
+ return 1;
+ }
+ if(tail(alist)!=4) {
+ std::cerr<<"delete before tail did not change tail! "<<__FILE__<<":"<<__LINE__<<std::endl;
+ }
+
+ return 0;
+}
+
+int testEmpty()
+{
+ typedef Dune::SLList<int,DoubleAllocator> List;
+ int ret = 0;
+
+ List alist;
+ if(!alist.empty()) {
+ std::cerr<<"Newly created list not empty! "<<__FILE__<<":"<<__LINE__<<std::endl;
+ ret++;
+ }
+
+ if(0 != alist.size()) {
+ std::cerr<<"Newly created list not empty! "<<__FILE__<<":"<<__LINE__<<std::endl;
+ ret++;
+ }
+
+ randomizeListBack(alist);
+
+
+ if(alist.empty()) {
+ std::cerr<<"Randomized list is empty! "<<__FILE__<<":"<<__LINE__<<std::endl;
+ ret++;
+ }
+
+ if(0 == alist.size()) {
+ std::cerr<<"Randomized list is empty! "<<__FILE__<<":"<<__LINE__<<std::endl;
+ ret++;
+ }
+
+ for(int elements=alist.size(); elements>0; --elements)
+ alist.pop_front();
+
+ if(!alist.empty()) {
+ std::cerr<<"Emptied list not empty! "<<__FILE__<<":"<<__LINE__<<std::endl;
+ ret++;
+ }
+
+ if(0 != alist.size()) {
+ std::cerr<<"Emptied list not empty! "<<__FILE__<<":"<<__LINE__<<std::endl;
+ ret++;
+ }
+
+
+ if(ret!=0)
+ // Skip next tests
+ return ret;
+
+ randomizeListFront(alist);
+
+ if(alist.empty()) {
+ std::cerr<<"Randomized list is empty! "<<__FILE__<<":"<<__LINE__<<std::endl;
+ ret++;
+ }
+
+ if(0 == alist.size()) {
+ std::cerr<<"Randomized list is empty! "<<__FILE__<<":"<<__LINE__<<std::endl;
+ ret++;
+ }
+
+ alist.clear();
+
+ if(!alist.empty()) {
+ std::cerr<<"Emptied list not empty! "<<__FILE__<<":"<<__LINE__<<std::endl;
+ ret++;
+ }
+
+ if(0 != alist.size()) {
+ std::cerr<<"Emptied list not empty! "<<__FILE__<<":"<<__LINE__<<std::endl;
+ ret++;
+ }
+ return ret;
+
+}
+
+int testInsert()
+{
+ typedef Dune::SLList<int,IntAllocator> List;
+ //typedef Dune::SLList<int> List;
+
+ List alist;
+
+ alist.push_back(3);
+ List::ModifyIterator iter=alist.beginModify();
+ iter.insert(7);
+ int ret=0;
+
+ if(*iter!=3) {
+ std::cerr<<"Value at current position changed due to insert! "<<__FILE__<<":"<<__LINE__<<std::endl;
+ ret++;
+ }
+
+ if(*alist.begin()!=7) {
+ std::cerr<<"Insert did not change first element! "<<__FILE__<<":"<<__LINE__<<std::endl;
+ ret++;
+ }
+
+ iter=alist.beginModify();
+ iter.insert(5);
+
+ if(iter==alist.end() || *iter!=7) {
+ std::cerr<<"Insertion failed.! "<<__FILE__<<":"<<__LINE__<<std::endl;
+ ++ret;
+ }
+
+ if(*(alist.begin())!=5) {
+ std::cerr<<"Insert after at onebeforeBegin did not change head! "<<__FILE__<<":"<<__LINE__<<std::endl;
+ ++ret;
+ }
+ iter = alist.endModify();
+
+ if(iter!=alist.end()) {
+ std::cerr <<" Iterator got by endModify does not equal that got by end()! "<<__FILE__<<":"<<__LINE__<<std::endl;
+ ++ret;
+ }
+
+
+ iter.insert(20);
+
+ if(iter != alist.end()) {
+ std::cerr<<"Insertion changed end iterator! "<<__FILE__<<":"<<__LINE__<<std::endl;
+ ++ret;
+ }
+
+ if(tail(alist)!=20) {
+ std::cerr<<"tail was not changed!! "<<__FILE__<<":"<<__LINE__<<std::endl;
+ ++ret;
+ }
+
+ alist.clear();
+ iter=alist.beginModify();
+ iter.insert(5);
+ if(iter!=alist.end()) {
+ std::cerr<<"Insertion failed! "<<__FILE__<<":"<<__LINE__<<std::endl;
+ ++ret;
+ }
+ return ret;
+}
+
+int testPushPop(){
+ using namespace Dune;
+ int ret=0;
+
+ Dune::SLList<int,IntAllocator> alist;
+ //std::cout<<"PushPop 1:"<<alist<<std::endl;
+
+ if(alist.begin() != alist.end()) {
+ ret++;
+ std::cerr<<"For empty list begin and end iterator do not match! "<<__FILE__<<":"<<__LINE__<<std::endl;
+ ret++;
+ }
+
+ alist.push_back(1);
+
+ //std::cout<<"Push back 1: "<<alist<<std::endl;
+
+ if(*(alist.begin())!=1) {
+ std::cerr<<"Entry should be 1! Push back failed! "<<__FILE__<<":"<<__LINE__<<std::endl;
+ ret++;
+ }
+
+ alist.push_back(2);
+
+ //std::cout<<"Push back 2: "<<alist<<std::endl;
+
+ if(*(alist.begin())!=1) {
+ ret++;
+ std::cerr<<"Entry should be 2! Push back failed! "<<__FILE__<<":"<<__LINE__<<std::endl;
+ }
+
+ alist.push_front(3);
+ //std::cout<<"Push front 3: "<<alist<<std::endl;
+
+ if(*(alist.begin())!=3) {
+ ret++;
+ std::cerr<<"Entry should be 3! Push front failed! "<<__FILE__<<":"<<__LINE__<<std::endl;
+ }
+
+ alist.pop_front();
+ //std::cout<<*(alist.begin())<<" Pop front: "<<alist<<std::endl;
+
+
+ if(*(alist.begin())!=1) {
+ ret++;
+ std::cerr<<"Entry should be 1, but is "<<*(alist.begin())<<"! Push back failed! "<<__FILE__<<":"<<__LINE__<<std::endl;
+ }
+ return ret;
+}
+
+int main()
+{
+ int ret=0;
+
+ //Dune::SLList<double> list;
+ Dune::SLList<double,DoubleAllocator> list, list1;
+ Dune::SLList<DoubleWrapper, DoubleWAllocator> list2;
+
+ randomizeListBack(list1);
+ randomizeListFront(list);
+
+ Dune::SLList<double,DoubleAllocator> copied(list);
+ if(copied.size()!=list.size()) {
+ std::cerr << "Size of copied list does not match!"<<std::endl;
+ ++ret;
+ }
+ else{
+ typedef Dune::SLList<double,DoubleAllocator>::const_iterator Iterator;
+ Iterator iend = list.end();
+ for(Iterator iter1=list.begin(), iter2=copied.begin(); iter1 != iend; ++iter1, ++iter2)
+ if(*iter1!=*iter2) {
+ std::cerr << "Entries of copied are not the same!"<<std::endl;
+ ++ret;
+ }
+ }
+
+ randomizeListFront(list2);
+
+ Printer<std::iterator_traits<Dune::SLList<double,DoubleAllocator>::ModifyIterator>::value_type> print;
+
+ Dune::SLList<double,DoubleAllocator>::ModifyIterator lbegin = list.beginModify(), lend = list.endModify();
+
+ double& d = lbegin.dereference();
+
+ d=2.0;
+
+ double& d1 = lbegin.dereference();
+
+ d1=3.0;
+
+ lbegin.dereference()=5.0;
+
+ lbegin.operator*()=5.0;
+
+ *lbegin=5.0;
+
+ std::cout << "Testing ConstIterator "<<std::endl;
+ ret+=testConstIterator(lbegin, lend, print);
+ std::cout << "Testing Iterator "<<std::endl;
+ ret+=testIterator(list);
+ std::cout << "Testing Iterator "<<std::endl;
+ ret+=testIterator(list1);
+
+ std::cout<< " Test PushPop "<<std::endl;
+ ret+=testPushPop();
+ std::cout<<" Test OneBeforeBegin"<<std::endl;
+
+ //ret+=testOneBeforeBegin(list1);
+
+ std::cout<< "test empty"<<std::endl;
+ ret+=testEmpty();
+ std::cout<< "test insert"<<std::endl;
+
+ ret+=testInsert();
+ std::cout<< "test delete"<<std::endl;
+ ret+=testDelete();
+
+ ret+=testAssign();
+ list.clear();
+ list1.clear();
+ list2.clear();
+ std::cout<<" randomize back"<<std::endl;
+ randomizeListBack(list);
+ std::cout<<" randomize front"<<std::endl;
+ randomizeListFront(list1);
+ return ret;
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <tuple>
+#include <utility>
+#include <sstream>
+
+#include <dune/common/std/apply.hh>
+
+#include <dune/common/test/testsuite.hh>
+
+
+template<class T>
+void nop(std::initializer_list<T>&&)
+{}
+
+
+int main()
+{
+ auto test_args = std::make_tuple(true, 2, 3, "abc");
+
+ Dune::TestSuite test;
+
+ auto concat = [](auto&&... args) {
+ bool first = true;
+ std::stringstream stream;
+ nop({(stream << (first ? "":",") << args, first = false)...});
+ return stream.str();
+ };
+
+ test.check(Dune::Std::apply(concat, test_args) == "1,2,3,abc") << "Dune::Std::apply failed with concat lambda";
+
+ auto makeTuple = [](auto&&... args) {
+ return std::make_tuple(args...);
+ };
+
+ test.check(Dune::Std::apply(makeTuple, test_args) == test_args) << "Dune::Std::apply failed with makeTuple lambda";
+
+ auto intTuple = std::make_tuple(1,2,3);
+ auto&& intTuple0 = Dune::Std::apply([](auto&& arg0, auto&&... /*args*/) -> decltype(auto) { return arg0; }, intTuple);
+ intTuple0 = 42;
+
+ test.check(std::get<0>(intTuple) == intTuple0) << "Dune::Std::apply does not properly return references";
+
+ // transformTuple implemented using Std::apply
+ auto transformTuple = [](auto&& t, auto&& f) {
+ return Dune::Std::apply([&](auto&&... args) {
+ return std::make_tuple((f(std::forward<decltype(args)>(args)))...);
+ }, t);
+ };
+
+ auto t1 = std::make_tuple(1, 0.2);
+ auto t2 = transformTuple(t1, [](auto&& x) { return 1.0/x; });
+
+ test.check(t2 == std::make_tuple(1.0, 5.0)) << "transformTuple implementation based on Dune::Std::apply fails";
+
+ return test.exit();
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+#include <dune/common/std/functional.hh>
+#include <iostream>
+#include <cassert>
+
+struct Foo {
+ static int count;
+ Foo() { ++count; std::cout << "construct" << std::endl; }
+ Foo(const Foo&) { ++count; std::cout << "copy construct" << std::endl; }
+ Foo(Foo&&) { ++count; std::cout << "move construct" << std::endl; }
+ ~Foo() { --count; std::cout << "deconstruct" << std::endl; }
+};
+int Foo::count = 0;
+
+template<typename T>
+T&& assert_count(T&& arg, int count)
+{
+ std::cout << std::decay_t<T>::count << std::endl;
+ if (std::decay_t<T>::count != count)
+ std::cerr << "Passed count does not match state of the argument" << std::endl;
+ return std::forward<T>(arg);
+}
+
+int main()
+{
+ auto id = Dune::Std::identity();
+
+ assert_count(id(Foo()),1); // pass an r-value to identity, still constructed on the assert
+
+ const auto& foo0 = id(Foo()); // pass an r-value to identity
+ assert_count(foo0,0); // id(Foo()) is alredy doconstructed at this point
+
+ auto foo1 = id(Foo()); // pass an r-value to identity and move it to foo1
+ assert_count(foo1,1); // foo0 is alredy doconstructed at this point
+
+ Foo foo2;
+ assert_count(id(foo2),2); // pass an l-value to identity
+
+ const auto& foo3 = id(foo2); // pass an l-value to identity
+ assert_count(foo3,2); // foo still exist at this point
+
+ auto foo4 = id(foo2); // pass an l-value to identity and copy its result
+ assert_count(foo4,3); // copy of foo still exist at this point
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+#if HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <iostream>
+#include <sstream>
+#include <string>
+#include <tuple>
+
+#include <dune/common/streamoperators.hh>
+
+using Dune::operator>>;
+using Dune::operator<<;
+
+int main()
+{
+ typedef std::tuple<int, int, int> Tuple;
+
+ {
+ const Tuple t{1, 2, 3};
+ const std::string expected = "[1,2,3]";
+
+ std::ostringstream out;
+ out << t;
+
+ if( out.str() != expected )
+ return 1;
+ }
+
+ {
+ const std::string data = "1 2 3";
+ const Tuple expected{1, 2, 3};
+
+ std::istringstream in(data);
+ Tuple t;
+ in >> t;
+
+ if( t != expected )
+ return 1;
+ }
+
+ return 0;
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+/*
+
+ Test to check if the standard streams in libdune can be properly
+ linked with this program and if they work
+
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+#include <fstream>
+
+#include <dune/common/stdstreams.hh>
+
+// enums are a nice special case (was a bug)
+enum check { VALUE = 5 };
+
+int main () {
+ try {
+ // let output happen but vanish
+ std::ofstream dummy("/dev/null");
+ Dune::derr.attach(dummy);
+
+ Dune::derr.push(true);
+ Dune::derr << "Teststring" << std::endl;
+
+ Dune::derr << VALUE << std::endl;
+ Dune::dverb << VALUE << std::endl;
+ Dune::dvverb << VALUE << std::endl;
+ Dune::dinfo << VALUE << std::endl;
+ Dune::dwarn << VALUE << std::endl;
+ Dune::dgrave << VALUE << std::endl;
+
+ // instantiate private stream and connect global stream
+ {
+ Dune::DebugStream<> mystream(dummy);
+ Dune::derr.tie(mystream);
+ Dune::derr << "Blah" << std::endl;
+ // untie before mystream gets destructed
+ Dune::derr.untie();
+ }
+
+ Dune::derr << "Still working" << std::endl;
+ } catch (Dune::Exception &e) {
+ std::cerr << e << std::endl;
+ return 2;
+ } catch (...) {
+ return 1;
+ };
+
+ return 0;
+}
--- /dev/null
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include <string>
+
+#include <dune/common/stringutility.hh>
+
+namespace {
+const std::string hello_world("hello world");
+} /* namespace */
+
+bool test_hasPrefix()
+{
+ bool pass = true;
+
+ using Dune::hasPrefix;
+ pass &= hasPrefix(hello_world, "hello");
+ pass &= !hasPrefix(hello_world, "world");
+
+ return pass;
+}
+
+bool test_hasSuffix()
+{
+ bool pass = true;
+
+ using Dune::hasSuffix;
+ pass &= hasSuffix(hello_world, "world");
+ pass &= !hasSuffix(hello_world, "hello");
+
+ return pass;
+}
+
+bool test_formatString()
+{
+ bool pass = true;
+ const int one = 1;
+ const static std::string format("hello %i");
+ const static std::string expected("hello 1");
+
+ using Dune::formatString;
+ const std::string s = formatString(format, one);
+ pass &= (s == expected);
+
+ return pass;
+}
+
+int main()
+{
+ bool pass = true;
+
+ pass &= test_hasPrefix();
+ pass &= test_hasSuffix();
+ pass &= test_formatString();
+
+ return pass ? 0 : 1;
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+// #define DEBUG_ALLOCATOR_KEEP 1
+#define DEBUG_NEW_DELETE 3
+
+#include <dune/common/debugallocator.hh>
+#if HAVE_MPROTECT
+
+#include <iostream>
+#include <csignal>
+#include <cstdlib>
+#include <vector>
+
+class A
+{
+public:
+ A() { std::cout << "INIT A\n"; }
+ int x;
+ void foo() {};
+};
+
+void basic_tests ()
+{
+ using Dune::DebugMemory::alloc_man;
+
+ size_t s = 256;
+ double * x = alloc_man.allocate<double>(s);
+ x[s-1] = 10;
+
+ // access out of bounds
+#ifdef FAILURE1
+ x[s+1] = 1;
+#endif
+
+ // lost allocation, free and double-free
+#ifndef FAILURE2
+ alloc_man.deallocate<double>(x);
+#endif
+#ifdef FAILURE3
+ alloc_man.deallocate<double>(x);
+#endif
+
+ // access after free
+#ifdef FAILURE4
+ x[s-1] = 10;
+#endif
+}
+
+void allocator_tests()
+{
+ std::vector<double, Dune::DebugAllocator<double> > v;
+ v.push_back(10);
+ v.push_back(12);
+ v.size();
+ std::cout << v[0] << "\n";
+ std::cout << v[1] << "\n";
+#ifdef FAILURE5
+ std::cout << v[v.capacity()] << "\n";
+#endif
+}
+
+void new_delete_tests()
+{
+ std::cout << "alloc double[3]\n";
+ double * y = new double[3];
+ delete[] y;
+
+ std::cout << "alloc A[2]\n";
+ A * z = new A[2];
+ z->foo();
+ delete[] z;
+ z = 0;
+
+ std::cout << "alloc (buf) A[3]\n";
+ char * buf = (char*)malloc(128);
+ A * z2 = new (buf) A[3];
+ z2->foo();
+ free(buf);
+ z2 = 0;
+
+ std::cout << "alloc A[4]\n";
+ A * z4 = ::new A[4];
+ z4->foo();
+ ::delete[] z4;
+ z4 = 0;
+}
+
+#endif // HAVE_MPROTECT
+
+int main(int, char**)
+{
+#if EXPECTED_SIGNAL
+ std::signal(EXPECTED_SIGNAL, std::_Exit);
+#endif
+#if EXPECTED_ALT_SIGNAL
+ std::signal(EXPECTED_ALT_SIGNAL, std::_Exit);
+#endif
+
+#if HAVE_MPROTECT
+ basic_tests();
+ allocator_tests();
+ new_delete_tests();
+#endif
+
+#ifdef EXPECTED_SIGNAL
+ return 1;
+#else
+ return 0;
+#endif
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+// Test the new (Dune) interface of float_cmp
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <iostream>
+
+#include <dune/common/float_cmp.hh>
+
+using std::cout;
+using std::endl;
+using std::flush;
+
+/////////////////////////
+//
+// compile time checks
+//
+
+// check that we can access the functions as FloatCmp::function from within the Dune namespace
+namespace Dune {
+ void checkNamespaceAccess() {
+ FloatCmp::eq(0.0, 0.0);
+ }
+} // namespace Dune
+ // check that we can access the functions as FloatCmp::function with using namespace Dune
+void checkUsingAccess() {
+ using namespace Dune;
+ FloatCmp::eq(0.0, 0.0);
+}
+
+// run time checks
+const char* repr(bool b) {
+ if(b) return "true ";
+ else return "false";
+}
+
+int passed = 0;
+int failed = 0;
+
+void count(bool pass) {
+ if(pass) { cout << "passed"; ++passed; }
+ else { cout << "failed"; ++failed; }
+}
+
+template<typename F>
+void tests(F f1, F f2, typename Dune::FloatCmp::EpsilonType<F>::Type eps, bool inside)
+{
+ bool result;
+
+ cout << "eq(" << f1 << ", " << f2 << ", " << eps << ") = " << flush;
+ result = Dune::FloatCmp::eq(f1, f2, eps);
+ cout << repr(result) << "\t";
+ count(result == inside);
+ cout << endl;
+
+ cout << "ge(" << f1 << ", " << f2 << ", " << eps << ") = " << flush;
+ result = Dune::FloatCmp::ge(f1, f2, eps);
+ cout << repr(result) << "\t";
+ count(result == (inside || f1 > f2));
+ cout << endl;
+
+ cout << "le(" << f1 << ", " << f2 << ", " << eps << ") = " << flush;
+ result = Dune::FloatCmp::le(f1, f2, eps);
+ cout << repr(result) << "\t";
+ count(result == (inside || f1 < f2));
+ cout << endl;
+
+ cout << "ne(" << f1 << ", " << f2 << ", " << eps << ") = " << flush;
+ result = Dune::FloatCmp::ne(f1, f2, eps);
+ cout << repr(result) << "\t";
+ count(result == !inside);
+ cout << endl;
+
+ cout << "gt(" << f1 << ", " << f2 << ", " << eps << ") = " << flush;
+ result = Dune::FloatCmp::gt(f1, f2, eps);
+ cout << repr(result) << "\t";
+ count(result == (!inside && f1 > f2));
+ cout << endl;
+
+ cout << "lt(" << f1 << ", " << f2 << ", " << eps << ") = " << flush;
+ result = Dune::FloatCmp::lt(f1, f2, eps);
+ cout << repr(result) << "\t";
+ count(result == (!inside && f1 < f2));
+ cout << endl;
+}
+
+template<typename F>
+void vectortests(F f1, F f2, typename Dune::FloatCmp::EpsilonType<F>::Type eps, bool inside)
+{
+ bool result;
+
+ cout << "eq({" << f1[0] << ", " << f1[1] << "}, {"
+ << f2[0] << ", " << f2[1] << "}, " << eps << ") = " << flush;
+ result = Dune::FloatCmp::eq(f1, f2, eps);
+ cout << repr(result) << "\t";
+ count(result == inside);
+ cout << endl;
+
+ cout << "ne({" << f1[0] << ", " << f1[1] << "}, {"
+ << f2[0] << ", " << f2[1] << "}, " << eps << ") = " << flush;
+ result = Dune::FloatCmp::ne(f1, f2, eps);
+ cout << repr(result) << "\t";
+ count(result == !inside);
+ cout << endl;
+}
+
+template<typename F>
+void tests(F f1, F f2, const typename Dune::FloatCmpOps<F> &ops, bool inside)
+{
+ bool result;
+ cout << "ops = operations(" << ops.epsilon() << ")" << endl;
+
+ cout << "ops.eq(" << f1 << ", " << f2 << ") = " << flush;
+ result = ops.eq(f1, f2);
+ cout << repr(result) << "\t";
+ count(result == inside);
+ cout << endl;
+
+ cout << "ops.ge(" << f1 << ", " << f2 << ") = " << flush;
+ result = ops.ge(f1, f2);
+ cout << repr(result) << "\t";
+ count(result == (inside || f1 > f2));
+ cout << endl;
+
+ cout << "ops.le(" << f1 << ", " << f2 << ") = " << flush;
+ result = ops.le(f1, f2);
+ cout << repr(result) << "\t";
+ count(result == (inside || f1 < f2));
+ cout << endl;
+
+ cout << "ops.ne(" << f1 << ", " << f2 << ") = " << flush;
+ result = ops.ne(f1, f2);
+ cout << repr(result) << "\t";
+ count(result == !inside);
+ cout << endl;
+
+ cout << "ops.gt(" << f1 << ", " << f2 << ") = " << flush;
+ result = ops.gt(f1, f2);
+ cout << repr(result) << "\t";
+ count(result == (!inside && f1 > f2));
+ cout << endl;
+
+ cout << "ops.lt(" << f1 << ", " << f2 << ") = " << flush;
+ result = ops.lt(f1, f2);
+ cout << repr(result) << "\t";
+ count(result == (!inside && f1 < f2));
+ cout << endl;
+}
+
+template<typename F>
+void vectortests(F f1, F f2, typename Dune::FloatCmpOps<F> &ops, bool inside)
+{
+ bool result;
+ cout << "ops = operations(" << ops.epsilon() << ")" << endl;
+
+ cout << "ops.eq({" << f1[0] << ", " << f1[1] << "}, {"
+ << f2[0] << ", " << f2[1] << "}) = " << flush;
+ result = ops.eq(f1, f2);
+ cout << repr(result) << "\t";
+ count(result == inside);
+ cout << endl;
+
+ cout << "ops.ne({" << f1[0] << ", " << f1[1] << "}, {"
+ << f2[0] << ", " << f2[1] << "}) = " << flush;
+ result = ops.ne(f1, f2);
+ cout << repr(result) << "\t";
+ count(result == !inside);
+ cout << endl;
+}
+
+int main() {
+ cout.setf(std::ios_base::scientific, std::ios_base::floatfield);
+ cout.precision(16);
+ Dune::FloatCmpOps<double> ops(1e-7);
+ Dune::FloatCmpOps<std::vector<double>> std_vec_ops(1e-7);
+ Dune::FloatCmpOps<Dune::FieldVector<double,2>> fvec_ops(1e-7);
+
+ cout << "Tests inside the epsilon environment" << endl;
+ tests<double>(1.0, 1.00000001, 1e-7, true);
+ tests<double>(1.0, 1.00000001, ops, true);
+ vectortests(std::vector<double>({1.0, 1.0}), std::vector<double>({1.00000001, 1.0}), 1e-7, true);
+ vectortests(std::vector<double>({1.0, 1.0}), std::vector<double>({1.00000001, 1.0}), std_vec_ops, true);
+ vectortests(Dune::FieldVector<double,2>({1.0, 1.0}), Dune::FieldVector<double,2>({1.00000001, 1.0}), 1e-7, true);
+ vectortests(Dune::FieldVector<double,2>({1.0, 1.0}), Dune::FieldVector<double,2>({1.00000001, 1.0}), fvec_ops, true);
+
+ cout << "Tests outside the epsilon environment, f1 < f2" << endl;
+ tests<double>(1.0, 1.000001, 1e-7, false);
+ tests<double>(1.0, 1.000001, ops, false);
+ vectortests(std::vector<double>({1.0, 1.0}), std::vector<double>({1.000001, 1.0}), 1e-7, false);
+ vectortests(std::vector<double>({1.0, 1.0}), std::vector<double>({1.000001, 1.0}), std_vec_ops, false);
+ vectortests(Dune::FieldVector<double,2>({1.0, 1.0}), Dune::FieldVector<double,2>({1.000001, 1.0}), 1e-7, false);
+ vectortests(Dune::FieldVector<double,2>({1.0, 1.0}), Dune::FieldVector<double,2>({1.000001, 1.0}), fvec_ops, false);
+
+ cout << "Tests outside the epsilon environment, f1 > f2" << endl;
+ tests<double>(1.000001, 1.0, 1e-7, false);
+ tests<double>(1.000001, 1.0, ops, false);
+ vectortests(std::vector<double>({1.000001, 1.0}), std::vector<double>({1.0, 1.0}), 1e-7, false);
+ vectortests(std::vector<double>({1.000001, 1.0}), std::vector<double>({1.0, 1.0}), std_vec_ops, false);
+ vectortests(Dune::FieldVector<double,2>({1.000001, 1.0}), Dune::FieldVector<double,2>({1.0, 1.0}), 1e-7, false);
+ vectortests(Dune::FieldVector<double,2>({1.000001, 1.0}), Dune::FieldVector<double,2>({1.0, 1.0}), fvec_ops, false);
+
+ cout << "Tests with f1 = f2 = 0" << endl;
+ tests<double>(0, 0, 1e-7, true);
+ tests<double>(0, 0, ops, true);
+ vectortests(std::vector<double>({0, 0}), std::vector<double>({0, 0}), 1e-7, true);
+ vectortests(std::vector<double>({0, 0}), std::vector<double>({0, 0}), std_vec_ops, true);
+ vectortests(Dune::FieldVector<double,2>({0, 0}), Dune::FieldVector<double,2>({0, 0}), 1e-7, true);
+ vectortests(Dune::FieldVector<double,2>({0, 0}), Dune::FieldVector<double,2>({0, 0}), fvec_ops, true);
+
+ int total = passed + failed;
+ cout << passed << "/" << total << " tests passed; " << failed << "/" << total << " tests failed" << endl;
+ if(failed > 0) return 1;
+ else return 0;
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_COMMON_TEST_TESTSUITE_HH
+#define DUNE_COMMON_TEST_TESTSUITE_HH
+
+#include <iostream>
+#include <sstream>
+#include <string>
+
+#include <dune/common/exceptions.hh>
+#include <dune/common/test/collectorstream.hh>
+
+
+
+namespace Dune {
+
+
+
+ /**
+ * \brief A Simple helper class to organize your test suite
+ *
+ * Usage: Construct a TestSuite and call check() or require()
+ * with the condition to check and probably a name for this check.
+ * These methods return a stream such that you can pipe in an
+ * explanantion accompanied by respective data to give a reason
+ * for a test failure.
+ */
+ class TestSuite
+ {
+ public:
+ enum ThrowPolicy
+ {
+ AlwaysThrow,
+ ThrowOnRequired
+ };
+
+ /**
+ * \brief Create TestSuite
+ *
+ * \param name A name to identify this TestSuite. Defaults to "".
+ * \param policy If AlwaysThrow any failing check will throw, otherwise only required checks will do.
+ */
+ TestSuite(ThrowPolicy policy, std::string name="") :
+ name_(name),
+ checks_(0),
+ failedChecks_(0),
+ throwPolicy_(policy==AlwaysThrow)
+ {}
+
+ /**
+ * \brief Create TestSuite
+ *
+ * \param name A name to identify this TestSuite. Defaults to "".
+ * \param policy If AlwaysThrow any failing check will throw, otherwise only required checks will do. Defaults to ThrowOnRequired
+ */
+ TestSuite(std::string name="", ThrowPolicy policy=ThrowOnRequired) :
+ name_(name),
+ checks_(0),
+ failedChecks_(0),
+ throwPolicy_(policy==AlwaysThrow)
+ {}
+
+ /**
+ * \brief Check condition
+ *
+ * This will throw an exception if the check fails and if the AlwaysThrow policy was used on creation.
+ *
+ * \param conditon Checks if this is true and increases the failure counter if not.
+ * \param name A name to identify this check. Defaults to ""
+ * \returns A CollectorStream that can be used to create a diagnostic message to be printed on failure.
+ */
+ CollectorStream check(bool condition, std::string name="")
+ {
+ ++checks_;
+ if (not condition)
+ ++failedChecks_;
+
+ return CollectorStream([condition, name, this](std::string reason) {
+ if (not condition)
+ this->announceCheckResult(throwPolicy_, "CHECK ", name, reason);
+ });
+ }
+
+ /**
+ * \brief Check a required condition
+ *
+ * This will always throw an exception if the check fails.
+ *
+ * \param conditon Checks if this is true and increases the failure counter if not.
+ * \param name A name to identify this check. Defaults to ""
+ * \returns A CollectorStream that can be used to create a diagnostic message to be printed on failure.
+ */
+ CollectorStream require(bool condition, std::string name="")
+ {
+ ++checks_;
+ if (not condition)
+ ++failedChecks_;
+
+ return CollectorStream([condition, name, this](std::string reason) {
+ if (not condition)
+ this->announceCheckResult(true, "REQUIRED CHECK", name, reason);
+ });
+ }
+
+ /**
+ * \brief Collect data from a sub-TestSuite
+ *
+ * This will incorporate the accumulated results of the sub-TestSuite
+ * into this one. If the sub-TestSuite failed, i.e., contained failed
+ * checks, a summary will be printed.
+ */
+ void subTest(const TestSuite& subTest)
+ {
+ checks_ += subTest.checks_;
+ failedChecks_ += subTest.failedChecks_;
+
+ if (not subTest)
+ announceCheckResult(throwPolicy_, "SUBTEST", subTest.name(), std::to_string(subTest.failedChecks_)+"/"+std::to_string(subTest.checks_) + " checks failed in this subtest.");
+ }
+
+ /**
+ * \brief Check if this TestSuite failed
+ *
+ * \returns False if any of the executed tests failed, otherwise true.
+ */
+ explicit operator bool () const
+ {
+ return (failedChecks_==0);
+ }
+
+ /**
+ * \brief Query name
+ *
+ * \returns Name of this TestSuite
+ */
+ std::string name() const
+ {
+ return name_;
+ }
+
+ /**
+ * \brief Print a summary of this TestSuite
+ *
+ * \returns False if any of the executed tests failed, otherwise true.
+ */
+ bool report() const
+ {
+ if (failedChecks_>0)
+ std::cout << composeMessage("TEST ", name(), std::to_string(failedChecks_)+"/"+std::to_string(checks_) + " checks failed in this test.") << std::endl;
+ return (failedChecks_==0);
+ }
+
+ /**
+ * \brief Exit the test.
+ *
+ * This wil print a summary of the test and return an integer
+ * to be used on program exit.
+ *
+ * \returns 1 if any of the executed tests failed, otherwise 0.
+ */
+ int exit() const
+ {
+ return (report() ? 0: 1);
+ }
+
+ protected:
+
+ // Compose a diagnostic message
+ static std::string composeMessage(std::string type, std::string name, std::string reason)
+ {
+ std::ostringstream s;
+ s << type << " FAILED";
+ if (name!="")
+ s << "(" << name << ")";
+ s << ": ";
+ if (reason!="")
+ s << reason;
+ return s.str();
+ }
+
+ // Announce check results. To be called on failed checks
+ static void announceCheckResult(bool throwException, std::string type, std::string name, std::string reason)
+ {
+ std::string message = composeMessage(type, name, reason);
+ std::cout << message << std::endl;
+ if (throwException)
+ {
+ Dune::Exception ex;
+ ex.message(message);
+ throw ex;
+ }
+ }
+
+ std::string name_;
+ std::size_t checks_;
+ std::size_t failedChecks_;
+ bool throwPolicy_;
+ };
+
+
+
+} // namespace Dune
+
+
+
+#endif // DUNE_COMMON_TEST_TESTSUITE_HH
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <iostream>
+#include <fstream>
+#include <dune/common/fvector.hh>
+#include <dune/common/timer.hh>
+#include <dune/istl/bvector.hh>
+#include <dune/common/iteratorfacades.hh>
+
+template<int bs, int sz>
+void timing_vector()
+{
+ std::cout << "timing_vector<" << bs << ", " << sz << ">\n";
+ typedef Dune::FieldVector<double,bs> VB;
+ typedef Dune::BlockVector<VB> BV;
+ typedef Dune::BlockVector<BV> BBV;
+ BV bv1(sz), bv2(sz);
+ BV bv3(sz), bv4(sz);
+ bv1 = 1;
+ bv2 = 0;
+ bv2[1][0]=1;
+ bv2[1][1]=2;
+
+ bv3 = 0;
+ bv4 = 0;
+
+ BBV bbv(2);
+ bbv[0].resize(bv1.N());
+ bbv[1].resize(bv2.N());
+
+ BBV bbv2(2);
+#warning deep copy is broken!
+ /* bbv2 = bbv2; */
+ bbv2[0] = bv3;
+ bbv2[1] = bv4;
+ // bbv2 = 0;
+
+ Dune::Timer stopwatch;
+ stopwatch.reset();
+ for (int i=0; i<10; i++)
+ {
+#ifdef DUNE_EXPRESSIONTEMPLATES
+#ifdef DUNE_FLATIT
+ for (int a=0; a<2; a++)
+ for (int b=0; b<sz; b++)
+ for (int c=0; c<bs; c++)
+ bbv2[a][b][c] += 2*bbv[a][b][c];
+#else
+ bbv2 += 2*bbv;
+#endif
+#else
+ bbv2.axpy(2,bbv);
+#endif
+ }
+ std::cout << "Time [bbv2.axpy(2,bbv)] " << stopwatch.elapsed() << std::endl;
+}
+
+#if 0
+//template<int BlockSize, int N, int M>
+template<int BN, int BM, int N, int M>
+void timing_matrix()
+{
+ std::cout << "timing_matrix<" << BN << ", " << BM << ", "
+ << N << ", " << M << ">\n";
+
+ typedef double matvec_t;
+ typedef Dune::FieldVector<matvec_t,BN> LVB;
+ typedef Dune::FieldVector<matvec_t,BM> VB;
+ typedef Dune::FieldMatrix<matvec_t,BN,BM> MB;
+ typedef Dune::BlockVector<LVB> LeftVector;
+ typedef Dune::BlockVector<VB> Vector;
+ typedef Dune::BCRSMatrix<MB> Matrix;
+
+ Matrix A(N,M,Matrix::row_wise);
+ typename Matrix::CreateIterator i=A.createbegin();
+ typename Matrix::CreateIterator end=A.createend();
+ std::cout << "Building matrix structure\n";
+ // build up the matrix structure
+ int c=0;
+ for (; i!=end; ++i)
+ {
+ // insert a non zero entry for myself
+ i.insert(c);
+ // insert index M-1
+ i.insert(M-1);
+ c++;
+ }
+ std::cout << "...done\n";
+
+ LeftVector v(N);
+ v = 0;
+ Vector x(M);
+ x = 1;
+
+ Dune::Timer stopwatch;
+ stopwatch.reset();
+#ifdef DUNE_EXPRESSIONTEMPLATES
+ v += A * x;
+#else
+ A.umv(x,v);
+#endif
+ std::cout << "Time [v+=A*x] " << stopwatch.elapsed() << std::endl;
+
+ std::cout << std::endl;
+}
+#endif
+
+int main ()
+{
+#ifdef DUNE_EXPRESSIONTEMPLATES
+#ifdef DUNE_FLATIT
+ std::cout << "Handwritten loops\n";
+#else
+ std::cout << "Expression Templates\n";
+#endif
+#else
+ std::cout << "Template Meta Program\n";
+#endif
+
+ timing_vector<1,1000000>();
+ timing_vector<2,500000>();
+ timing_vector<10,100000>();
+ timing_vector<40,25000>();
+ timing_vector<100,10000>();
+ timing_vector<400,2500>();
+
+ // timing_matrix<150,150,500,4000>();
+ // timing_matrix<150,150,1000,2000>();
+ // timing_matrix<1,18,400000,500000>();
+ // timing_matrix<6,3,400000,500000>();
+ // timing_matrix<3,6,400000,500000>();
+ // timing_matrix<18,1,400000,500000>();
+ // timing_matrix<50,50,9000,10000>();
+
+ std::cout << std::endl;
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#include "config.h"
+
+#include <type_traits>
+#include <string>
+#include <cstddef>
+
+#include <dune/common/transpose.hh>
+#include <dune/common/fmatrix.hh>
+#include <dune/common/diagonalmatrix.hh>
+#include <dune/common/classname.hh>
+#include <dune/common/test/testsuite.hh>
+
+
+
+// check A*transpose(B)
+template<class A, class B>
+auto checkAxBT(const A& a, const B&b)
+{
+ Dune::TestSuite suite(std::string{"Check transpose with A="} + Dune::className<A>() + " and B=" + Dune::className<B>());
+
+ // compute abt
+ auto abt = a * transpose(b);
+
+ // check result type
+ using FieldA = typename Dune::FieldTraits<A>::field_type;
+ using FieldB = typename Dune::FieldTraits<B>::field_type;
+ using Field = typename Dune::PromotionTraits<FieldA, FieldB>::PromotedType;
+ using ABT = Dune::FieldMatrix<Field,A::rows, B::rows>;
+ suite.check(std::is_same<decltype(abt), ABT>())
+ << "Result type of A*transpose(B) should be " << Dune::className<ABT>() << " but is " << Dune::className<decltype(abt)>();
+
+ // manually compute result value
+ auto abt_check = ABT{};
+ for(std::size_t i=0; i<A::rows; ++i)
+ for(std::size_t j=0; j<B::rows; ++j)
+ for(auto&& [b_jk, k] : Dune::sparseRange(b[j]))
+ abt_check[i][j] += a[i][k]*b_jk;
+
+ // check result value
+ bool equal = true;
+ for(std::size_t i=0; i<A::rows; ++i)
+ for(std::size_t j=0; j<B::rows; ++j)
+ equal = equal and (abt_check[i][j] == abt[i][j]);
+ suite.check(equal)
+ << "Result of A*transpose(B) should be \n" << abt_check << " but is \n" << abt;
+
+ return suite;
+}
+
+
+int main()
+{
+ Dune::TestSuite suite;
+
+ // fill dense matrix with test data
+ auto testFillDense = [](auto& matrix) {
+ std::size_t k=0;
+ for(std::size_t i=0; i<matrix.N(); ++i)
+ for(std::size_t j=0; j<matrix.M(); ++j)
+ matrix[i][j] = k++;
+ };
+
+ {
+ auto a = Dune::FieldMatrix<double,3,4>{};
+ auto b = Dune::FieldMatrix<double,7,4>{};
+ testFillDense(a);
+ testFillDense(b);
+ suite.subTest(checkAxBT(a,b));
+ }
+
+ {
+ auto a = Dune::FieldMatrix<double,1,2>{};
+ auto b = Dune::FieldMatrix<double,3,2>{};
+ testFillDense(a);
+ testFillDense(b);
+ suite.subTest(checkAxBT(a,b));
+ }
+
+ {
+ auto a = Dune::FieldMatrix<double,1,2>{};
+ auto b = Dune::FieldMatrix<double,1,2>{};
+ testFillDense(a);
+ testFillDense(b);
+ suite.subTest(checkAxBT(a,b));
+ }
+
+ {
+ auto a = Dune::FieldMatrix<double,3,4>{};
+ auto b = Dune::DiagonalMatrix<double,4>{};
+ testFillDense(a);
+ b = {0, 1, 2, 3};
+ suite.subTest(checkAxBT(a,b));
+ }
+
+ return suite.exit();
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+#if HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <cmath>
+#include <cstddef>
+#include <iostream>
+#include <tuple>
+
+#include <dune/common/tupleutility.hh>
+
+//////////////////////////////////////////////////////////////////////
+//
+// check FirstTypeIndex
+//
+typedef std::tuple<int, unsigned, double> MyTuple;
+static_assert((Dune::FirstTypeIndex<MyTuple, int>::value == 0),
+ "FirstTypeIndex finds the wrong index for double in MyTuple!");
+static_assert((Dune::FirstTypeIndex<MyTuple, unsigned>::value == 1),
+ "FirstTypeIndex finds the wrong index for double in MyTuple!");
+static_assert((Dune::FirstTypeIndex<MyTuple, double>::value == 2),
+ "FirstTypeIndex finds the wrong index for double in MyTuple!");
+
+
+
+//////////////////////////////////////////////////////////////////////
+//
+// check PushBackTuple
+typedef Dune::PushBackTuple<MyTuple, char>::type MyTupleAppended1;
+typedef std::tuple<int, unsigned, double, char> MyTupleAppended2;
+static_assert((std::is_same<MyTupleAppended1, MyTupleAppended2>::value),
+ "PushBackTuple failed!");
+
+
+
+//////////////////////////////////////////////////////////////////////
+//
+// check PushFrontTuple
+typedef Dune::PushFrontTuple<MyTuple, char>::type MyTuplePrepended1;
+typedef std::tuple<char, int, unsigned, double> MyTuplePrepended2;
+static_assert((std::is_same<MyTuplePrepended1, MyTuplePrepended2>::value),
+ "PushFrontTuple failed!");
+
+
+
+//////////////////////////////////////////////////////////////////////
+//
+// check JoinTuples
+typedef Dune::JoinTuples<MyTuple, MyTuple>::type MyTupleMyTuple1;
+typedef std::tuple<int, unsigned, double, int, unsigned, double> MyTupleMyTuple2;
+static_assert((std::is_same<MyTupleMyTuple1, MyTupleMyTuple2>::value),
+ "JoinTuples failed!");
+
+
+
+//////////////////////////////////////////////////////////////////////
+//
+// check FlattenTuple
+typedef std::tuple<char, float> MyTuple2;
+typedef std::tuple<MyTuple, MyTuple2> MyTupleTuple;
+typedef Dune::FlattenTuple<MyTupleTuple>::type MyTupleTupleFlat1;
+typedef std::tuple<int, unsigned, double, char, float> MyTupleTupleFlat2;
+static_assert((std::is_same<MyTupleTupleFlat1, MyTupleTupleFlat2>::value),
+ "FlattenTuples failed!");
+
+
+
+//////////////////////////////////////////////////////////////////////
+//
+// check nested ReduceTuple with a litte TMP
+
+// A tuple of a range of integers wrapped in integral_constant types
+template<int start, int end>
+struct Range
+{
+ typedef typename Dune::PushBackTuple<
+ typename Range<start, end-1>::type,
+ typename std::integral_constant<int,end-1>
+ >::type type;
+};
+
+template<int start>
+struct Range<start, start>
+{
+ typedef std::tuple<> type;
+};
+
+// An accumulator to build up a list of divisors of an integer using reduce
+template<class Data, class PotentialDivisor>
+struct DivisorAccumulator
+{
+ enum {value = Data::first_type::value};
+ enum {isDivisor = (PotentialDivisor::value*(value / PotentialDivisor::value)==value)};
+
+ typedef typename Data::second_type OldTuple;
+ typedef typename Dune::PushBackTuple<OldTuple, PotentialDivisor>::type ExtendedTuple;
+ typedef typename std::conditional<isDivisor, ExtendedTuple, OldTuple>::type NewTuple;
+
+ typedef typename std::pair<typename Data::first_type, NewTuple> type;
+};
+
+// Construct list of divisors using reduce
+template<int X>
+struct Divisors
+{
+ typedef typename Dune::ReduceTuple<
+ DivisorAccumulator,
+ typename Range<1,X+1>::type,
+ typename std::pair<typename std::integral_constant<int, X>, typename std::tuple<> >
+ >::type::second_type type;
+
+ enum {value = std::tuple_size<type>::value};
+};
+
+// An accumulator to build up a list of primes up to a fixed integer
+template<class Data, class N>
+struct PrimeAccumulator
+{
+ enum {isPrime = (Divisors<N::value>::value==2)};
+
+ typedef typename std::conditional<isPrime, typename Dune::PushBackTuple<Data, N>::type, Data>::type type;
+};
+
+// Construct list primes
+template<int X>
+struct Primes
+{
+ typedef typename Dune::ReduceTuple<
+ PrimeAccumulator,
+ typename Range<1,X+1>::type,
+ typename std::tuple<>
+ >::type type;
+};
+
+typedef Primes<9>::type Primes1;
+typedef std::tuple<
+ std::integral_constant<int, 2>,
+ std::integral_constant<int, 3>,
+ std::integral_constant<int, 5>,
+ std::integral_constant<int, 7> > Primes2;
+static_assert((std::is_same<Primes1, Primes2>::value),
+ "ReduceTuple failed in primes-tmp!");
+
+struct Reciprocal
+{
+ template<class>
+ struct TypeEvaluator
+ {
+ typedef double Type;
+ };
+ template<class T>
+ typename TypeEvaluator<T>::Type operator()(const T& val) const {
+ return 1./val;
+ }
+};
+
+int main()
+{
+ const std::tuple<int, double> t1(1, 2.);
+ auto t2 = Dune::genericTransformTuple(t1, Reciprocal());
+ static_assert(std::is_same<decltype(t2), std::tuple<double, double>>::value,
+ "Type after genericTransformTuple does not match!");
+ if(fabs(std::get<0>(t2)-1.) > 1e-8 ||
+ fabs(std::get<1>(t2)-.5) > 1e-8)
+ {
+ std::cout << "genericTransformTuple gives wrong result!\n";
+ std::abort();
+ }
+
+ auto t3 = Dune::applyPartial([&] (auto&&... x) {
+ return std::make_tuple((1./x)...);
+ }, t1, std::make_index_sequence<2>());
+
+ if(t2 != t3)
+ {
+ std::cout << "genericTransformTuple gives wrong result!\n";
+ std::abort();
+ }
+
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <tuple>
+#include <type_traits>
+#include <typeindex>
+#include <typeinfo>
+#include <utility>
+#include <vector>
+
+#include <dune/common/hybridutilities.hh>
+#include <dune/common/test/testsuite.hh>
+#include <dune/common/typelist.hh>
+#include <dune/common/typetraits.hh>
+
+template<class... T>
+int isTypeListByOverload(const Dune::TypeList<T...> *);
+
+template<class T, class = void>
+struct IsTypeListByOverload : std::false_type {};
+
+template<class T>
+struct IsTypeListByOverload
+ <T, std::void_t<decltype(isTypeListByOverload(std::declval<const T*>()))> >
+: std::true_type {};
+
+template<class T>
+struct IsTypeListBySpecialization : std::false_type {};
+
+template<class... T>
+struct IsTypeListBySpecialization<Dune::TypeList<T...> > : std::true_type {};
+
+template<class TL>
+void staticLiteralTests() {
+ // check default-constructible
+ TL tl1;
+ TL tl2{};
+
+ // check copy construction
+ TL tl3 = tl1;
+ TL tl4(tl1);
+
+ // check move construction
+ TL tl5 = std::move(tl1);
+ TL tl6(std::move(tl2));
+
+ // check copying
+ tl1 = tl5;
+ tl2 = { tl1 };
+
+ // check moving
+ tl3 = std::move(tl1);
+ tl4 = { std::move(tl2) };
+
+ // check literal type requirement
+ constexpr TL tl7{};
+
+ // specialization/overload resolution tests
+ static_assert(IsTypeListBySpecialization<TL>::value,
+ "TypeList cannot be recongized by class specialization");
+ static_assert(IsTypeListByOverload<TL>::value,
+ "TypeList cannot be recongized by function overload resolution");
+
+ // avoid compiler warnings
+ (void)tl1;
+ (void)tl2;
+ (void)tl3;
+ (void)tl4;
+ (void)tl5;
+ (void)tl6;
+ (void)tl7;
+
+ // destructor checked on scope exit
+}
+
+static constexpr struct {} skipOverloadTest{};
+
+template<class T>
+void checkNonTypeList(decltype(skipOverloadTest))
+{
+ // make sure IsTypeList and IsEmptyTypeList reject non-typelists
+ static_assert(!Dune::IsTypeList<T>::value,
+ "IsTypeList accepts non-TypeList");
+ static_assert(!Dune::IsEmptyTypeList<T>::value,
+ "IsEmptyTypeList accepts non-TypeList");
+
+ // specialization tests
+ static_assert(!IsTypeListBySpecialization<T>::value,
+ "Non-TypeList recongized as TypeList by class specialization");
+}
+
+template<class T>
+void checkNonTypeList()
+{
+ checkNonTypeList<T>(skipOverloadTest);
+
+ // overload resolution tests
+ static_assert(!IsTypeListByOverload<T>::value,
+ "Non-TypeList recongized as TypeList by function overload resolution");
+}
+
+void staticTests()
+{
+ {
+ using TL = Dune::TypeList<>;
+ static_assert(Dune::IsTypeList<TL>::value,
+ "TypeList not recognized by IsTypeList");
+ static_assert(Dune::IsEmptyTypeList<TL>::value,
+ "Empty TypeList not recognized by IsEmptyTypeList");
+ static_assert(Dune::TypeListSize<TL>::value == 0,
+ "Incorrect result of TypeListeSize");
+
+ staticLiteralTests<TL>();
+ }
+
+ {
+ using TL = Dune::TypeList<void>;
+ static_assert(Dune::IsTypeList<TL>::value,
+ "TypeList not recognized by IsTypeList");
+ static_assert(!Dune::IsEmptyTypeList<TL>::value,
+ "Nonempty TypeList declared empty by IsEmptyTypeList");
+ static_assert(Dune::TypeListSize<TL>::value == 1,
+ "Incorrect result of TypeListeSize");
+ static_assert(std::is_same<typename Dune::TypeListElement<0, TL>::type,
+ void>::value,
+ "TypeListElement returns wrong type");
+ static_assert(std::is_same<Dune::TypeListEntry_t<0, TL>, void>::value,
+ "TypeListEntry_t returns wrong type");
+
+ staticLiteralTests<TL>();
+ }
+
+ {
+ using TL = Dune::TypeList<const int, int[10], int(int, int)>;
+ static_assert(Dune::IsTypeList<TL>::value,
+ "TypeList not recognized by IsTypeList");
+ static_assert(!Dune::IsEmptyTypeList<TL>::value,
+ "Nonempty TypeList declared empty by IsEmptyTypeList");
+ static_assert(Dune::TypeListSize<TL>::value == 3,
+ "Incorrect result of TypeListeSize");
+
+ static_assert(std::is_same<typename Dune::TypeListElement<0, TL>::type,
+ const int>::value,
+ "TypeListElement returns wrong type");
+ static_assert(std::is_same<Dune::TypeListEntry_t<0, TL>, const int>::value,
+ "TypeListEntry_t returns wrong type");
+
+ static_assert(std::is_same<typename Dune::TypeListElement<1, TL>::type,
+ int[10]>::value,
+ "TypeListElement returns wrong type");
+ static_assert(std::is_same<Dune::TypeListEntry_t<1, TL>, int[10]>::value,
+ "TypeListEntry_t returns wrong type");
+
+ static_assert(std::is_same<typename Dune::TypeListElement<2, TL>::type,
+ int(int, int)>::value,
+ "TypeListElement returns wrong type");
+ static_assert(std::is_same<Dune::TypeListEntry_t<2, TL>,
+ int(int, int)>::value,
+ "TypeListEntry_t returns wrong type");
+
+ staticLiteralTests<TL>();
+ }
+
+ // make sure IsTypeList and IsEmptyTypeList reject non-typelists
+ checkNonTypeList<void>();
+ checkNonTypeList<int>();
+ // don't check tuple<>, that may actually be an implementation of TypeList<>
+ checkNonTypeList<std::tuple<int> >();
+ // `tuple<void>` is a complete, but noninstantiable type. Attempting to use
+ // an object of type `tuple<void>` as an argument to a function call
+ // necessiates instantiation -- which is illegal even in an SFINAE context.
+ // The instantiation is necessary to check for conversions (via conversion
+ // operators and base classes) during overload resolution. Even if the
+ // signature of the function is of the form `f(const Expr<T>*)` for some
+ // template parameter `T` and we call it as `f(declval<tuple<void>*>())` the
+ // base classes `tuple<void>` must be determined to figure out whether
+ // `tuple<void>*` can be converted to `Expr<T>*`.
+ checkNonTypeList<std::tuple<void> >(skipOverloadTest);
+}
+
+struct NonConstructible {
+ NonConstructible() = delete;
+};
+
+template<class TypeList>
+auto getTypeInfos(TypeList typeList)
+{
+ using namespace Dune::Hybrid;
+
+ std::vector<std::type_index> result;
+ forEach(typeList, [&](auto metaType) {
+ using type = typename decltype(metaType)::type;
+ result.emplace_back(typeid (type));
+ });
+ return result;
+}
+
+template<class TypeList>
+auto getMetaTypeInfos(TypeList typeList)
+{
+ using namespace Dune::Hybrid;
+
+ std::vector<std::type_index> result;
+ // The parens around `forEach` are needed to suppress ADL here to avoid
+ // instantiation attempts for the member types of typeList
+ (forEach)(typeList, [&](auto metaType) {
+ result.emplace_back(typeid (metaType));
+ });
+ return result;
+}
+
+int main()
+{
+ staticTests();
+
+ Dune::TestSuite test;
+
+ auto typeList = Dune::TypeList<void, NonConstructible, int>{};
+ auto expectedTypeInfoList = std::vector<std::type_index>{
+ typeid (void), typeid (NonConstructible), typeid (int)
+ };
+ test.check(getTypeInfos(typeList) == expectedTypeInfoList)
+ << "Iterating over TypeList yields unexpected type information";
+
+ // This test also ensures that the type passed to the lamda in the
+ // `forEach()` is indeed an instance of `MetaType`
+ auto metaTypeList =
+ Dune::TypeList<void, NonConstructible, std::tuple<void> >{};
+ auto expectedMetaTypeInfoList = std::vector<std::type_index>{
+ typeid (Dune::MetaType<void>),
+ typeid (Dune::MetaType<NonConstructible>),
+ typeid (Dune::MetaType<std::tuple<void> >),
+ };
+ // parens around `getMetaTypeInfos` needed to suppress ADL
+ test.check((getMetaTypeInfos)(metaTypeList) == expectedMetaTypeInfoList)
+ << "Iterating over TypeList yields unexpected MetaTypes";
+
+ return test.exit();
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+#if HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <type_traits>
+
+#include <dune/common/typeutilities.hh>
+
+//////////////////////////////////////////////////////////////////////
+//
+// check disableCopyMove
+//
+
+struct Foo {
+
+ template< class ...Args, Dune::disableCopyMove< Foo, Args ... > = 0 >
+ Foo( Args&& ... )
+ {}
+
+ Foo( const Foo& ) = delete;
+ Foo( Foo&& ) = delete;
+};
+
+static_assert( std::is_default_constructible< Foo >::value, "Foo is not default constructible." );
+static_assert( not std::is_copy_constructible< Foo >::value, "Foo is copy constructible." );
+static_assert( not std::is_move_constructible< Foo >::value, "Foo is move constructible." );
+
+int main()
+{}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+#if HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <iostream>
+#include <tuple>
+
+#include <dune/common/typetraits.hh>
+#include <dune/common/tupleutility.hh>
+
+template<class T>
+struct Eval
+{
+ typedef void* Type;
+};
+
+int main(int, char**)
+{
+ typedef std::tuple<int*,double*,long*,char*> PointerTuple;
+ PointerTuple pointers = Dune::NullPointerInitialiser<PointerTuple>::apply();
+
+ int ret=0;
+
+ if(std::get<0>(pointers)!=nullptr) {
+ std::cerr<<"First pointer not null"<<std::endl;
+ ret=1;
+ }
+ if(std::get<1>(pointers)!=nullptr) {
+ std::cerr<<"Second pointer not null"<<std::endl;
+ ret=2;
+ }
+
+ if(std::get<2>(pointers)!=nullptr) {
+ std::cerr<<"Third pointer not null"<<std::endl;
+ ret=3;
+ }
+
+ if(std::get<3>(pointers)!=nullptr) {
+ std::cerr<<"Fourth pointer not null"<<std::endl;
+ ret=4;
+ }
+
+ int i = 3;
+ long l = 4;
+ char c = 's';
+
+ typedef std::tuple<int,char,long,char> Tuple1;
+ typedef std::tuple<int&,char&,long&,char&> RefTuple1;
+ typedef std::tuple<int*,char*,long*,char*> PointerTuple1;
+ static_assert((std::is_same<PointerTuple1,
+ Dune::ForEachType<Dune::AddPtrTypeEvaluator,
+ RefTuple1>::Type>::value),
+ "RefTuple1 with added pointers should be the same as "
+ "PointerTuple1, but it isn't!");
+
+ Tuple1 t1(i,c,l,c);
+ RefTuple1 refs(i, c, l, c);
+
+ [[maybe_unused]] RefTuple1 refs2(Dune::transformTuple<Dune::AddRefTypeEvaluator>(t1));
+ PointerTuple1 pointers1(Dune::transformTuple<Dune::AddPtrTypeEvaluator>(refs));
+ if(&i != std::get<0>(pointers1) || &c != std::get<1>(pointers1) ||
+ &l != std::get<2>(pointers1) || &c != std::get<3>(pointers1)) {
+ std::cerr << "utilitytest: error: incorrect pointers in pointers1"
+ << std::endl;
+ ret = 1;
+ }
+
+ if(Dune::At<2>::get(pointers)!=std::get<1>(pointers)) {
+ ret+=10;
+ std::cerr<<"at inconsistent!"<<std::endl;
+ }
+
+ PointerTuple1 p(new int(), new char(), new long(), new char());
+
+ [[maybe_unused]] typedef Dune::ForEachType<Eval,PointerTuple1>::Type ConvertedType;
+ Dune::PointerPairDeletor<PointerTuple1>::apply(p);
+ if(p != PointerTuple1(nullptr,nullptr,nullptr,nullptr)){
+ ret+=20;
+ std::cerr<<"PointerPairDeletor not working!"<<std::endl;
+ }
+
+ return ret;
+}
--- /dev/null
+#include "config.h"
+
+#if !HAVE_VC
+#error Inconsistent buildsystem. This program should not be built in the \
+ absence of Vc.
+#endif
+
+#include <cstdlib>
+#include <map>
+#include <iostream>
+#include <string>
+
+#include <dune/common/exceptions.hh>
+#include <dune/common/vc.hh>
+
+const std::map<Vc::Implementation, std::string> impl_names = {
+ {Vc::ScalarImpl, "Scalar" },
+ {Vc::SSE2Impl, "SSE2" },
+ {Vc::SSE3Impl, "SSE3" },
+ {Vc::SSSE3Impl, "SSSE3" },
+ {Vc::SSE41Impl, "SSE41" },
+ {Vc::SSE42Impl, "SSE42" },
+ {Vc::AVXImpl, "AVX" },
+ {Vc::AVX2Impl, "AVX2" },
+ {Vc::MICImpl, "MIC" },
+};
+
+const std::string expected_var = "DUNE_TEST_EXPECTED_VC_IMPLEMENTATION";
+
+int main()
+{
+
+ auto p = impl_names.find(Vc::CurrentImplementation::current());
+ if(p == impl_names.end())
+ DUNE_THROW(Dune::NotImplemented, "Unexpected current implementation value "
+ << Vc::CurrentImplementation::current());
+ auto current_impl = p->second;
+
+ std::cout << "The current Vc implementation is " << current_impl
+ << std::endl;
+
+ std::string expected_impl;
+ if(auto env_impl = std::getenv(expected_var.c_str()))
+ expected_impl = env_impl;
+
+ if(expected_impl.empty())
+ {
+ std::cerr << "No expected vc imlementation provided, skipping test\n"
+ << "Please set " << expected_var
+ << " environment variable to one of the following values:";
+ for(const auto &item : impl_names)
+ std::cerr << ' ' << item.second;
+ std::cerr << std::endl;
+ return 77;
+ }
+
+ std::cout << "The expected Vc implementation is " << expected_impl
+ << std::endl;
+
+ if(current_impl == expected_impl) {
+ std::cout << "OK: Current and expected Vc implementation match"
+ << std::endl;
+ return 0;
+ }
+ else {
+ std::cout << "Error: Current Vc implementation does not match expected"
+ << std::endl;
+ return 1;
+ }
+
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+// DUNE_MODTEST_VERSION 3.2.1
+#define DUNE_MODTEST_VERSION_MAJOR 3
+#define DUNE_MODTEST_VERSION_MINOR 2
+#define DUNE_MODTEST_VERSION_REVISION 1
+
+#if HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <iostream>
+#include <dune/common/version.hh>
+
+// test 3.3 >= 3.2
+#if DUNE_VERSION_GTE(DUNE_MODTEST, 3, 3)
+#error "3.2 >= 3.3"
+#endif
+
+// test 3.2 >= 3.1
+#if DUNE_VERSION_LTE(DUNE_MODTEST, 3, 1)
+#error "3.2 <= 3.1"
+#endif
+
+// test 3.2 == 3.2
+#if DUNE_VERSION_GT(DUNE_MODTEST, 3, 2)
+ #error "3.2 > 3.2"
+#elif DUNE_VERSION_LT(DUNE_MODTEST, 3, 2)
+ #error "3.2 < 3.2"
+#else
+ #if ! DUNE_VERSION_EQUAL(DUNE_MODTEST, 3, 2)
+ #error "3.2 != 3.2"
+ #endif
+#endif
+
+// test 3.2.2 >= 3.2.1
+#if DUNE_VERSION_GTE_REV(DUNE_MODTEST, 3, 2, 2)
+#error "3.2.1 >= 3.2.2"
+#endif
+
+// test 3.2.1 >= 3.2.0
+#if DUNE_VERSION_LTE_REV(DUNE_MODTEST, 3, 2, 0)
+#error "3.2.1 <= 3.2.0"
+#endif
+
+// test 3.2.1 == 3.2.1
+#if DUNE_VERSION_GT_REV(DUNE_MODTEST, 3, 2, 1)
+ #error "3.2.1 > 3.2.1"
+#elif DUNE_VERSION_LT_REV(DUNE_MODTEST, 3, 2, 1)
+ #error "3.2.1 < 3.2.1"
+#else
+ #if ! DUNE_VERSION_EQUAL_REV(DUNE_MODTEST, 3, 2, 1)
+ #error "3.2.1 != 3.2.1"
+ #endif
+#endif
+
+int main()
+{
+ return 0;
+}
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_TIMER_HH
+#define DUNE_TIMER_HH
+
+#ifndef TIMER_USE_STD_CLOCK
+// headers for std::chrono
+#include <chrono>
+#else
+// headers for std::clock
+#include <ctime>
+#endif
+
+namespace Dune {
+
+ /** @addtogroup Common
+ @{
+ */
+
+ /*! \file
+ \brief A simple timing class.
+ */
+
+
+ /** \brief A simple stop watch
+
+ This class reports the elapsed user-time, i.e. time spent computing,
+ after the last call to Timer::reset(). The results are seconds and
+ fractional seconds. Note that the resolution of the timing depends
+ on your OS kernel which should be somewhere in the milisecond range.
+
+ The class is basically a wrapper for the libc-function getrusage()
+
+ \warning In a multi-threading situation, this class does NOT return wall-time!
+ Instead, the run time for all threads will be added up.
+ For example, if you have four threads running in parallel taking one second each,
+ then the Timer class will return an elapsed time of four seconds.
+
+ */
+ class Timer
+ {
+ public:
+
+ /** \brief A new timer, create and reset
+ *
+ * \param startImmediately If true (default) the timer starts counting immediately
+ */
+ Timer (bool startImmediately=true) noexcept
+ {
+ isRunning_ = startImmediately;
+ reset();
+ }
+
+ //! Reset timer while keeping the running/stopped state
+ void reset() noexcept
+ {
+ sumElapsed_ = 0.0;
+ storedLastElapsed_ = 0.0;
+ rawReset();
+ }
+
+
+ //! Start the timer and continue measurement if it is not running. Otherwise do nothing.
+ void start() noexcept
+ {
+ if (not (isRunning_))
+ {
+ rawReset();
+ isRunning_ = true;
+ }
+ }
+
+
+ //! Get elapsed user-time from last reset until now/last stop in seconds.
+ double elapsed () const noexcept
+ {
+ // if timer is running add the time elapsed since last start to sum
+ if (isRunning_)
+ return sumElapsed_ + lastElapsed();
+
+ return sumElapsed_;
+ }
+
+
+ //! Get elapsed user-time from last start until now/last stop in seconds.
+ double lastElapsed () const noexcept
+ {
+ // if timer is running return the current value
+ if (isRunning_)
+ return rawElapsed();
+
+ // if timer is not running return stored value from last run
+ return storedLastElapsed_;
+ }
+
+
+ //! Stop the timer and return elapsed().
+ double stop() noexcept
+ {
+ if (isRunning_)
+ {
+ // update storedLastElapsed_ and sumElapsed_ and stop timer
+ storedLastElapsed_ = lastElapsed();
+ sumElapsed_ += storedLastElapsed_;
+ isRunning_ = false;
+ }
+ return elapsed();
+ }
+
+
+ private:
+
+ bool isRunning_;
+ double sumElapsed_;
+ double storedLastElapsed_;
+
+
+#ifdef TIMER_USE_STD_CLOCK
+ void rawReset() noexcept
+ {
+ cstart = std::clock();
+ }
+
+ double rawElapsed () const noexcept
+ {
+ return (std::clock()-cstart) / static_cast<double>(CLOCKS_PER_SEC);
+ }
+
+ std::clock_t cstart;
+#else
+ void rawReset() noexcept
+ {
+ cstart = std::chrono::high_resolution_clock::now();
+ }
+
+ double rawElapsed () const noexcept
+ {
+ std::chrono::high_resolution_clock::time_point now = std::chrono::high_resolution_clock::now();
+ std::chrono::duration<double> time_span = std::chrono::duration_cast<std::chrono::duration<double> >(now - cstart);
+ return time_span.count();
+ }
+
+ std::chrono::high_resolution_clock::time_point cstart;
+#endif
+ }; // end class Timer
+
+ /** @} end documentation */
+
+} // end namespace
+
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+#ifndef DUNE_TO_UNIQUE_PTR_HH
+#define DUNE_TO_UNIQUE_PTR_HH
+
+#warning to_unique_ptr.hh and ToUniquePtr are deprecated. Use std::unique_ptr or std::shared_ptr instead.
+
+#include <memory>
+
+namespace Dune
+{
+ /// \brief Alias for `std::unique_ptr` introduced as transition wrapper.
+ /// \deprecated
+ template <class T>
+ using ToUniquePtr [[deprecated]] = std::unique_ptr<T>;
+
+ /// \brief Alias for `std::make_unique` introduced as transition wrapper.
+ /// \deprecated
+ template <class T, class... Args>
+ [[deprecated]] std::unique_ptr<T> makeToUnique (Args&&... args)
+ {
+ return std::make_unique(std::forward<Args>(args)...);
+ }
+
+} // end namespace Dune
+
+#endif // DUNE_TO_UNIQUE_PTR_HH
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_COMMON_TRANSPOSE_HH
+#define DUNE_COMMON_TRANSPOSE_HH
+
+#include <cstddef>
+
+#include <dune/common/fmatrix.hh>
+#include <dune/common/promotiontraits.hh>
+
+namespace Dune {
+
+namespace Impl {
+
+ // Wrapper representing the transposed of a matrix.
+ // Creating the wrapper does not compute anything
+ // but only serves for tagging the wrapped matrix
+ // for transposition.
+ template<class M>
+ class TransposedMatrixWrapper
+ {
+ public:
+
+ enum {
+ //! The number of rows.
+ rows = M::cols,
+ //! The number of columns.
+ cols = M::rows
+ };
+
+ TransposedMatrixWrapper(const M& matrix) : matrix_(matrix) {}
+ TransposedMatrixWrapper(const TransposedMatrixWrapper&) = delete;
+ TransposedMatrixWrapper(TransposedMatrixWrapper&&) = delete;
+
+ template<class OtherField, int otherRows>
+ friend auto operator* (const FieldMatrix<OtherField, otherRows, rows>& matrixA,
+ const TransposedMatrixWrapper& matrixB)
+ {
+ using ThisField = typename FieldTraits<M>::field_type;
+ using Field = typename PromotionTraits<ThisField, OtherField>::PromotedType;
+ FieldMatrix<Field, otherRows, cols> result;
+ for (std::size_t j=0; j<otherRows; ++j)
+ matrixB.matrix_.mv(matrixA[j], result[j]);
+ return result;
+ }
+
+ private:
+
+ const M& matrix_;
+ };
+
+} // namespace Impl
+
+/**
+ * \brief Create a wrapper modelling the transposed matrix
+ *
+ * Currently the wrapper only implements
+ * \code
+ * auto c = a*transpose(b);
+ * \endcode
+ * if a is a FieldMatrix of appropriate size. This is
+ * optimal even for sparse b because it only relies on
+ * calling b.mv(a[i], c[i]) for the rows of a.
+ *
+ * Since the created object only stores a reference
+ * to the wrapped matrix, it cannot be modified and
+ * should not be stored but used directly.
+ */
+template<class Matrix>
+auto transpose(const Matrix& matrix) {
+ return Impl::TransposedMatrixWrapper<Matrix>(matrix);
+}
+
+
+} // namespace Dune
+
+#endif // DUNE_COMMON_TRANSPOSE_HH
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+#ifndef DUNE_TUPLE_UTILITY_HH
+#define DUNE_TUPLE_UTILITY_HH
+
+#include <cstddef>
+#include <tuple>
+#include <type_traits>
+#include <utility>
+
+#include <dune/common/hybridutilities.hh>
+#include <dune/common/typetraits.hh>
+
+namespace Dune {
+
+ /** @addtogroup TupleUtilities
+ *
+ * @{
+ */
+
+ /**
+ * @file
+ * @brief Contains utility classes which can be used with std::tuple.
+ */
+
+ /**
+ * \brief Apply function with arguments from a given tuple
+ *
+ * \param f A callable object
+ * \param args Tuple containing the arguments
+ * \param indices Indices to arguments in tuple as std::integer_sequence
+ *
+ * This will call the function with arguments generated by unpacking those
+ * entries of the tuple that show up given integer_sequence.
+ *
+ * \ingroup Utility
+ */
+ template<class F, class ArgTuple, class I, I... i>
+ decltype(auto) applyPartial(F&& f, ArgTuple&& args, std::integer_sequence<I, i...> /*indices*/)
+ {
+ return f(std::get<i>(args)...);
+ }
+
+ template<class T>
+ struct TupleAccessTraits
+ {
+ typedef typename std::add_const<T>::type& ConstType;
+ typedef T& NonConstType;
+ typedef const typename std::remove_const<T>::type& ParameterType;
+ };
+
+ template<class T>
+ struct TupleAccessTraits<T*>
+ {
+ typedef typename std::add_const<T>::type* ConstType;
+ typedef T* NonConstType;
+ typedef T* ParameterType;
+ };
+
+ template<class T>
+ struct TupleAccessTraits<T&>
+ {
+ typedef T& ConstType;
+ typedef T& NonConstType;
+ typedef T& ParameterType;
+ };
+
+ /**
+ * @brief A helper template that initializes a std::tuple consisting of pointers
+ * to nullptr.
+ *
+ * A std::tuple of nullptr may be useful when you use a std::tuple of pointers
+ * in a class which you can only initialise in a later stage.
+ */
+ template<class T>
+ struct NullPointerInitialiser;
+
+ template<class... Args>
+ struct NullPointerInitialiser<std::tuple<Args...> >
+ {
+ typedef std::tuple<Args...> ResultType;
+ static ResultType apply()
+ {
+ return ResultType(static_cast<Args>(nullptr)...);
+ }
+ };
+
+ /**
+ * @brief Helper template to clone the type definition of a std::tuple with the
+ * storage types replaced by a user-defined rule.
+ *
+ * Suppose all storage types A_i in a std::tuple define a type A_i::B. You can
+ * build up a pair consisting of the types defined by A_i::B in the following
+ * way:
+ *
+ * \code
+ * template <class A>
+ * struct MyEvaluator
+ * {
+ * typedef typename A::B Type;
+ * };
+ *
+ * typedef ForEachType<MyEvaluator, ATuple>::Type BTuple;
+ * \endcode
+ *
+ * Here, MyEvaluator is a helper struct that extracts the correct type from
+ * the storage types of the tuple defined by the tuple ATuple.
+ *
+ * \sa AddRefTypeEvaluator, AddPtrTypeEvaluator, genericTransformTuple(),
+ * and transformTuple().
+ */
+ template<template <class> class TE, class T>
+ struct ForEachType;
+
+ template<template <class> class TE, class... Args>
+ struct ForEachType<TE, std::tuple<Args...> >
+ {
+ typedef std::tuple<typename TE<Args>::Type...> Type;
+ };
+
+#ifndef DOXYGEN
+ template<class Tuple, class Functor, std::size_t... I>
+ inline auto genericTransformTupleBackendImpl(Tuple& t, Functor& f, const std::index_sequence<I...>& )
+ -> std::tuple<decltype(f(std::get<I>(t)))...>
+ {
+ return std::tuple<decltype(f(std::get<I>(t)))...>(f(std::get<I>(t))...);
+ }
+
+ template<class... Args, class Functor>
+ auto genericTransformTupleBackend(std::tuple<Args...>& t, Functor& f) ->
+ decltype(genericTransformTupleBackendImpl(t, f,std::index_sequence_for<Args...>{}))
+ {
+ return genericTransformTupleBackendImpl(t, f,std::index_sequence_for<Args...>{});
+ }
+
+ template<class... Args, class Functor>
+ auto genericTransformTupleBackend(const std::tuple<Args...>& t, Functor& f) ->
+ decltype(genericTransformTupleBackendImpl(t, f, std::index_sequence_for<Args...>{}))
+ {
+ return genericTransformTupleBackendImpl(t, f, std::index_sequence_for<Args...>{});
+ }
+#endif
+
+ /**
+ * This function does for the value of a std::tuple what ForEachType does for the
+ * type of a std::tuple: it transforms the value using a user-provided policy
+ * functor.
+ *
+ * \param t The std::tuple value to transform.
+ * \param f The functor to use to transform the values.
+ *
+ * The functor should have the following form:
+ *
+ * \code
+ * struct Functor
+ * {
+ * template<class>
+ * struct TypeEvaluator
+ * {
+ * typedef user-defined Type;
+ * };
+ *
+ * template<class T>
+ * typename TypeEvaluator<T>::Type operator()(T& val);
+ *
+ * template<class T>
+ * typename TypeEvaluator<T>::Type operator()(T& val) const;
+ *
+ * template<class T>
+ * typename TypeEvaluator<T>::Type operator()(const T& val);
+ *
+ * template<class T>
+ * typename TypeEvaluator<T>::Type operator()(const T& val) const;
+ * };
+ * \endcode
+ *
+ * The member class template \c TypeEvaluator should be a class template
+ * suitable as the \c TypeEvaluator template parameter for ForEachType. The
+ * function call operator \c operator() is used to transform the value; only
+ * the signatures of \c operator() which are actually used must be present.
+ */
+ template<class Tuple, class Functor>
+ auto genericTransformTuple(Tuple&& t, Functor&& f) ->
+ decltype(genericTransformTupleBackend(t, f))
+ {
+ return genericTransformTupleBackend(t, f);
+ }
+
+ /**
+ * \tparam TE TypeEvaluator class template.
+ * \tparam An Type of extra arguments to pass to \c TE<T>::apply(). \c void
+ * means "no argument". Only trailing arguments may be void.
+ *
+ * This class stores references to a number of arguments it receives in the
+ * constructor. Later, its function call operator \c operator() may be
+ * called with a parameter \c t of type \c T. \c operator() will then call
+ * the static method \c TE<T>::apply(t,args...), where \c args... is the
+ * sequence of arguments the object was constructed with. \c operator()
+ * will convert the result to type \c TE<T>::Type and return it.
+ *
+ * \c TE should be an extended version of the \c TypeEvaluator class
+ * template parameter of ForEachType, for instance:
+ *
+ * \code
+ * template <class T>
+ * struct TypeEvaluator
+ * {
+ * typedef T* Type;
+ * static Type apply(T& t, void* a0)
+ * {
+ * return t ? &t : static_cast<T*>(a0);
+ * }
+ * };
+ * \endcode
+ *
+ * In this example, for the value transformation, it takes a reference to a value
+ * of type T and return the pointer to that value, unless the value evaluates to false
+ * in boolean context. If the value evaluates to false, it will instead return the
+ * pointer from the extra argument.
+ */
+ template<template<class> class TE, class... Args>
+ class TransformTupleFunctor
+ {
+ mutable std::tuple<Args&...> tup;
+
+ template<class T, std::size_t... I>
+ inline auto apply(T&& t, const std::index_sequence<I...>& ) ->
+ decltype(TE<T>::apply(t,std::get<I>(tup)...)) const
+ {
+ return TE<T>::apply(t,std::get<I>(tup)...);
+ }
+
+ public:
+ template<class T>
+ struct TypeEvaluator : public TE<T>
+ {};
+
+ TransformTupleFunctor(Args&&... args)
+ : tup(args...)
+ { }
+
+ template<class T>
+ inline auto operator()(T&& t) ->
+ decltype(this->apply(t,std::index_sequence_for<Args...>{})) const
+ {
+ return apply(t,std::index_sequence_for<Args...>{});
+ }
+ };
+
+ template<template<class> class TE, class... Args>
+ TransformTupleFunctor<TE, Args...> makeTransformTupleFunctor(Args&&... args)
+ {
+ return TransformTupleFunctor<TE, Args...>(args...);
+ }
+
+ /**
+ * This function provides functionality similar to genericTransformTuple(),
+ * although less general and closer in spirit to ForEachType.
+ *
+ * \tparam TypeEvaluator Used as the \c TE template argument to
+ * TransformTupleFunctor internally.
+ * \tparam Tuple Type of the std::tuple to transform.
+ * \tparam Args Types of extra argument to call the transformation
+ * function with.
+ *
+ * \param orig Tuple value to be transformed.
+ * \param args Extra arguments values to provide to the transformation
+ * function.
+ *
+ * The \c TypeEvaluator class template should be suitable as the \c TE
+ * template argument for TransformTupleFunctor. It has the following form
+ * (an extension of the \c TypeEvaluator template argument of ForEachType):
+ *
+ * \code
+ * template <class T>
+ * struct TypeEvaluator
+ * {
+ * typedef UserDefined Type;
+ *
+ * template<class... Args>
+ * static Type apply(T& t, Args&... args);
+ * };
+ * \endcode
+ *
+ * \sa genericTransforTuple(), ForEachType, AddRefTypeEvaluator, and
+ * AddPtrTypeEvaluator.
+ */
+ template<template<class> class TypeEvaluator, class Tuple, class... Args>
+ auto transformTuple(Tuple&& orig, Args&&... args) ->
+ decltype(genericTransformTuple(orig, makeTransformTupleFunctor<TypeEvaluator>(args...)))
+ {
+ return genericTransformTuple(orig, makeTransformTupleFunctor<TypeEvaluator>(args...));
+ }
+
+ //! \c TypeEvaluator to turn a type \c T into a reference to \c T
+ /**
+ * This is suitable as the \c TypeEvaluator template parameter for
+ * ForEachType and transformTuple().
+ */
+ template<class T>
+ struct AddRefTypeEvaluator
+ {
+ typedef T& Type;
+ static Type apply(T& t)
+ {
+ return t;
+ }
+ };
+
+ //! \c TypeEvaluator to turn a type \c T into a pointer to \c T
+ /**
+ * This is suitable as the \c TypeEvaluator template parameter for
+ * ForEachType and transformTuple().
+ */
+ template<class T>
+ struct AddPtrTypeEvaluator
+ {
+ typedef typename std::remove_reference<T>::type* Type;
+ static Type apply(T& t)
+ {
+ return &t;
+ }
+ };
+
+ // Specialization, in case the type is already a reference
+ template<class T>
+ struct AddPtrTypeEvaluator<T&>
+ {
+ typedef typename std::remove_reference<T>::type* Type;
+ static Type apply(T& t)
+ {
+ return &t;
+ }
+ };
+
+ /**
+ * @brief Type for reverse element access.
+ *
+ * Counterpart to ElementType for reverse element access.
+ */
+ template<int N, class Tuple>
+ struct AtType
+ {
+ typedef typename std::tuple_element<std::tuple_size<Tuple>::value - N - 1, Tuple>::type Type;
+ };
+
+ /**
+ * @brief Reverse element access.
+ *
+ * While Element<...> gives you the arguments beginning at the front of a
+ * std::tuple, At<...> starts at the end, which may be more convenient, depending
+ * on how you built your std::tuple.
+ */
+ template<int N>
+ struct At
+ {
+ template<typename Tuple>
+ static typename TupleAccessTraits<typename AtType<N, Tuple>::Type>::NonConstType
+ get(Tuple& t)
+ {
+ return std::get<std::tuple_size<Tuple>::value - N - 1>(t);
+ }
+
+ template<typename Tuple>
+ static typename TupleAccessTraits<typename AtType<N, Tuple>::Type>::ConstType
+ get(const Tuple& t)
+ {
+ return std::get<std::tuple_size<Tuple>::value - N - 1>(t);
+ }
+ };
+
+ /**
+ * @brief Deletes all objects pointed to in a std::tuple of pointers.
+ */
+ template<class Tuple>
+ struct PointerPairDeletor
+ {
+ template<typename... Ts>
+ static void apply(std::tuple<Ts...>& t)
+ {
+ Hybrid::forEach(t,[&](auto&& ti){delete ti; ti=nullptr;});
+ }
+ };
+
+ /**
+ * @brief Finding the index of a certain type in a std::tuple
+ *
+ * \tparam Tuple The std::tuple type to search in.
+ * \tparam Predicate Predicate which tells FirstPredicateIndex which types
+ * in Tuple to accept. This should be a class template
+ * taking a single type template argument. When
+ * instantiated, it should contain a static member
+ * constant \c value which should be convertible to bool.
+ * A type is accepted if \c value is \c true, otherwise it
+ * is rejected and the next type is tried. Look at IsType
+ * for a sample implementation.
+ * \tparam start First index to try. This can be adjusted to skip
+ * leading tuple elements.
+ * \tparam size This parameter is an implementation detail and should
+ * not be adjusted by the users of this class. It should
+ * always be equal to the size of the std::tuple.
+ *
+ * This class can search for a type in std::tuple. It will apply the predicate
+ * to each type in std::tuple in turn, and set its member constant \c value to
+ * the index of the first type that was accepted by the predicate. If none
+ * of the types are accepted by the predicate, a static_assert is triggered.
+ */
+ template<class Tuple, template<class> class Predicate, std::size_t start = 0,
+ std::size_t size = std::tuple_size<Tuple>::value>
+ class FirstPredicateIndex :
+ public std::conditional<Predicate<typename std::tuple_element<start,
+ Tuple>::type>::value,
+ std::integral_constant<std::size_t, start>,
+ FirstPredicateIndex<Tuple, Predicate, start+1> >::type
+ {
+ static_assert(std::tuple_size<Tuple>::value == size, "The \"size\" "
+ "template parameter of FirstPredicateIndex is an "
+ "implementation detail and should never be set "
+ "explicitly!");
+ };
+
+#ifndef DOXYGEN
+ template<class Tuple, template<class> class Predicate, std::size_t size>
+ class FirstPredicateIndex<Tuple, Predicate, size, size>
+ {
+ static_assert(AlwaysFalse<Tuple>::value, "None of the std::tuple element "
+ "types matches the predicate!");
+ };
+#endif // !DOXYGEN
+
+ /**
+ * @brief Generator for predicates accepting one particular type
+ *
+ * \tparam T The type to accept.
+ *
+ * The generated predicate class is useful together with
+ * FirstPredicateIndex. It will accept exactly the type that is given as
+ * the \c T template parameter.
+ */
+ template<class T>
+ struct IsType
+ {
+ //! @brief The actual predicate
+ template<class U>
+ struct Predicate : public std::is_same<T, U> {};
+ };
+
+ /**
+ * @brief Find the first occurrence of a type in a std::tuple
+ *
+ * \tparam Tuple The std::tuple type to search in.
+ * \tparam T Type to search for.
+ * \tparam start First index to try. This can be adjusted to skip leading
+ * std::tuple elements.
+ *
+ * This class can search for a particular type in std::tuple. It will check each
+ * type in the std::tuple in turn, and set its member constant \c value to the
+ * index of the first occurrence of type was found. If the type was not
+ * found, a static_assert is triggered.
+ */
+ template<class Tuple, class T, std::size_t start = 0>
+ struct FirstTypeIndex :
+ public FirstPredicateIndex<Tuple, IsType<T>::template Predicate, start>
+ { };
+
+ /**
+ * \brief Helper template to append a type to a std::tuple
+ *
+ * \tparam Tuple The std::tuple type to extend
+ * \tparam T The type to be appended to the std::tuple
+ */
+ template<class Tuple, class T>
+ struct PushBackTuple;
+
+ template<class... Args, class T>
+ struct PushBackTuple<typename std::tuple<Args...>, T>
+ {
+ typedef typename std::tuple<Args..., T> type;
+ };
+
+ /**
+ * \brief Helper template to prepend a type to a std::tuple
+ *
+ * \tparam Tuple The std::tuple type to extend
+ * \tparam T The type to be prepended to the std::tuple
+ */
+ template<class Tuple, class T>
+ struct PushFrontTuple;
+
+ template<class... Args, class T>
+ struct PushFrontTuple<typename std::tuple<Args...>, T>
+ {
+ typedef typename std::tuple<T, Args...> type;
+ };
+
+ /**
+ * \brief Apply reduce with meta binary function to template
+ *
+ * For a tuple\<T0,T1,...,TN-1,TN,...\> the exported result is
+ *
+ * F\< ... F\< F\< F\<Seed,T0\>\::type, T1\>\::type, T2\>\::type, ... TN-1\>\::type
+ *
+ * \tparam F Binary meta function
+ * \tparam Tuple Apply reduce operation to this std::tuple
+ * \tparam Seed Initial value for reduce operation
+ * \tparam N Reduce the first N std::tuple elements
+ */
+ template<
+ template <class, class> class F,
+ class Tuple,
+ class Seed=std::tuple<>,
+ int N=std::tuple_size<Tuple>::value>
+ struct ReduceTuple
+ {
+ typedef typename ReduceTuple<F, Tuple, Seed, N-1>::type Accumulated;
+ typedef typename std::tuple_element<N-1, Tuple>::type Value;
+
+ //! Result of the reduce operation
+ typedef typename F<Accumulated, Value>::type type;
+ };
+
+ /**
+ * \brief Apply reduce with meta binary function to template
+ *
+ * Specialization for reduction of 0 elements.
+ * The exported result type is Seed.
+ *
+ * \tparam F Binary meta function
+ * \tparam Tuple Apply reduce operation to this std::tuple
+ * \tparam Seed Initial value for reduce operation
+ */
+ template<
+ template <class, class> class F,
+ class Tuple,
+ class Seed>
+ struct ReduceTuple<F, Tuple, Seed, 0>
+ {
+ //! Result of the reduce operation
+ typedef Seed type;
+ };
+
+ /**
+ * \brief Join two std::tuple's
+ *
+ * For Head=std::tuple<T0,...,TN> and Tail=std::tuple<S0,...,SM>
+ * the exported result is std::tuple<T0,..,TN,S0,...,SM>.
+ *
+ * \tparam Head Head of resulting std::tuple
+ * \tparam Tail Tail of resulting std::tuple
+ */
+ template<class Head, class Tail>
+ struct JoinTuples
+ {
+ //! Result of the join operation
+ typedef typename ReduceTuple<PushBackTuple, Tail, Head>::type type;
+ };
+
+ /**
+ * \brief Flatten a std::tuple of std::tuple's
+ *
+ * This flattens a std::tuple of tuples std::tuple<std::tuple<T0,...,TN>, std::tuple<S0,...,SM> >
+ * and exports std::tuple<T0,..,TN,S0,...,SM>.
+ *
+ * \tparam TupleTuple A std::tuple of std::tuple's
+ */
+ template<class Tuple>
+ struct FlattenTuple
+ {
+ //! Result of the flatten operation
+ typedef typename ReduceTuple<JoinTuples, Tuple>::type type;
+ };
+
+ /** }@ */
+}
+
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_COMMON_TUPLEVECTOR_HH
+#define DUNE_COMMON_TUPLEVECTOR_HH
+
+#include <tuple>
+#include <utility>
+
+#include <dune/common/indices.hh>
+#include <dune/common/typetraits.hh>
+#include <dune/common/std/type_traits.hh>
+
+
+
+/**
+ * \file
+ * \brief Provides the TupleVector class that augments std::tuple by operator[]
+ * \author Carsten Gräser
+ */
+
+namespace Dune
+{
+
+
+
+/**
+ * \brief A class augmenting std::tuple by element access via operator[]
+ *
+ * \ingroup Utilities
+ */
+template<class... T>
+class TupleVector : public std::tuple<T...>
+{
+ using Base = std::tuple<T...>;
+
+ template<class... TT>
+ using TupleConstructorDetector = decltype(Base(std::declval<TT&&>()...));
+
+ template<class... TT>
+ using hasTupleConstructor = Dune::Std::is_detected<TupleConstructorDetector, TT...>;
+
+
+public:
+
+ /** \brief Construct from a set of arguments
+ *
+ * This is only available if you can construct
+ * the underlying std::tuple from the same argument
+ * list.
+ */
+ template<class... TT,
+ std::enable_if_t<hasTupleConstructor<TT...>::value, int> = 0>
+ constexpr TupleVector(TT&&... tt) :
+ Base(std::forward<TT>(tt)...)
+ {}
+
+ /** \brief Default constructor
+ */
+ constexpr TupleVector()
+ {}
+
+ /** \brief Const access to the tuple elements
+ */
+ template<std::size_t i>
+ constexpr decltype(auto) operator[](const Dune::index_constant<i>&) const
+ {
+ return std::get<i>(*this);
+ }
+
+ /** \brief Non-const access to the tuple elements
+ */
+ template<std::size_t i>
+ decltype(auto) operator[](const Dune::index_constant<i>&)
+ {
+ return std::get<i>(*this);
+ }
+
+ /** \brief Number of elements of the tuple */
+ static constexpr std::size_t size()
+ {
+ return std::tuple_size<Base>::value;
+ }
+};
+
+
+
+template<class... T>
+constexpr auto makeTupleVector(T&&... t)
+{
+ // The std::decay_t<T> is is a slight simplification,
+ // because std::reference_wrapper needs special care.
+ return TupleVector<std::decay_t<T>...>(std::forward<T>(t)...);
+}
+
+
+
+} // namespace Dune
+
+#endif // DUNE_COMMON_TUPLEVECTOR_HH
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_COMMON_TYPELIST_HH
+#define DUNE_COMMON_TYPELIST_HH
+
+#include <type_traits>
+#include <tuple>
+#include <utility>
+
+namespace Dune {
+
+ /**
+ * \brief A type that refers to another type
+ *
+ * \ingroup TypeUtilities
+ *
+ * The referred-to type can be accessed using the contained typedef `type`
+ * or, if you have a `MetaType` object by using the dereferencing operator.
+ *
+ * MetaType<T> is an empty literal class. Objects of type `MetaType<T>` can
+ * still be used even if `T` is incomplete or non-constructible. They can
+ * even be used if `T` is complete but non-instatiable
+ * (e.g. `std::tuple<void>`), although you need to be extra careful to avoid
+ * attempts to instantiate the template parameter `T` due to
+ * argument-dependent lookup (ADL).
+ *
+ * Objects of type `MetaType` are passed to the generic lambda when
+ * iterating over a `TypeList` using `Hybrid::forEach()`.
+ */
+ template<class T>
+ struct MetaType {
+ //! The referred-to type
+ using type = T;
+ };
+
+ /**
+ * \brief A simple type list
+ *
+ * \ingroup TypeUtilities
+ *
+ * The purpose of this is to encapsulate a list of types.
+ * This allows, e.g., to pack an argument-pack into one type.
+ * In contrast to a std::tuple a TypeList can be created
+ * without creating any object of the stored types.
+ *
+ * This can, e.g., be used for overload resolution
+ * with tag-dispatch where TypeList is used as tag.
+ * In combination with PriorityTag this allows to emulate
+ * partial specialization of function templates in
+ * a sane way, i.e., without the hassle of classic
+ * specialization of function templates
+ *
+ * A `TypeList<T...>` can be iterated over using `Hybrid::forEach()`. For
+ * the purpose of iterating with `Hybrid::forEach()`, the members of
+ * `TypeList<T...>{}` are `MetaType<T>{}...`. This allows iteration over
+ * incomplete and non-constructible types, since no attempt is made to
+ * create objects of those types:
+ * \code
+ * using namespace Hybrid;
+ * struct NonConstructible { NonConstructible() = delete; };
+ * forEach(TypeList<void, NonConstructible, int>{}, [] (auto metaType) {
+ * std::cout << className<typename decltype(metaType)::type>()
+ * << std::endl;
+ * });
+ * \endcode
+ *
+ * It is also possible to iterate over complete-but-non-instantiable types,
+ * e.g. `tuple<void>`. But to do so you need to suppress ADL in the
+ * invocation of `forEach()`, since ADL would try to instanciate complete
+ * types in the template argument list of `TypeList` in order to find the
+ * associated namespaces. To suppress ADL you can either use a qualified
+ * lookup:
+ * \code
+ * Hybrid::forEach(TypeList<std::tuple<void> >{},
+ * [] (auto metaType) { ... });
+ * });
+ * \endcode
+ * or you can enclose the name `forEach` in parentheses:
+ * \code
+ * using namespace Hybrid;
+ * (forEach)(TypeList<std::tuple<void> >{}, [] (auto metaType) { ... });
+ * \endcode
+ */
+ template<class... T>
+ using TypeList = std::tuple<MetaType<T>...>;
+
+
+
+ /**
+ * \brief Check if given type is a TypeList
+ *
+ * \ingroup TypeUtilities
+ *
+ * The result of the check is encoded in the
+ * base class of type std::integral_constant<bool, result>.
+ */
+ template<class T>
+ struct IsTypeList : std::false_type {};
+
+ /**
+ * \copydoc IsTypeList
+ *
+ * \ingroup TypeUtilities
+ */
+ template<class... T>
+ struct IsTypeList<TypeList<T...> > : std::true_type {};
+
+
+
+ /**
+ * \brief Check if given type is an empty TypeList
+ *
+ * \ingroup TypeUtilities
+ *
+ * The result of the check is encoded in the
+ * base class of type std::integral_constant<bool, result>.
+ */
+ template<class T>
+ struct IsEmptyTypeList : std::is_same<T, TypeList<> > {};
+
+
+
+ template<class T>
+ struct TypeListSize {};
+
+ /**
+ * \brief Get size of TypeList
+ *
+ * \ingroup TypeUtilities
+ *
+ * The result of is encoded in the base class of
+ * type std::integral_constant<std::size_t, result>.
+ */
+ template<class... T>
+ struct TypeListSize<TypeList<T...>> : std::integral_constant<std::size_t, sizeof...(T)> {};
+
+
+
+ template<std::size_t i, class T>
+ struct TypeListElement {};
+
+ /**
+ * \brief Get element of TypeList
+ *
+ * \ingroup TypeUtilities
+ */
+ template<std::size_t i, class... T>
+ struct TypeListElement<i, TypeList<T...>>
+ {
+ /**
+ * \brief Export type of i-th element in TypeList
+ *
+ * \todo Implement without using std::tuple.
+ */
+ using type = typename std::tuple_element<i, std::tuple<T...>>::type;
+
+ /**
+ * \brief Export type of i-th element in TypeList
+ *
+ * \todo Implement without using std::tuple.
+ */
+ using Type = type;
+ };
+
+ /**
+ * \brief Shortcut for TypeListElement<i, T>::type;
+ */
+ template<std::size_t i, class T>
+ using TypeListEntry_t = typename TypeListElement<i, T>::type;
+
+ namespace Impl {
+
+ template<template<class...> class Target, class ToDoList, class... Processed>
+ struct UniqueTypesHelper;
+
+ template<template<class...> class Target, class... Processed>
+ struct UniqueTypesHelper<Target, TypeList<>, Processed...>
+ {
+ using type = Target<Processed...>;
+ };
+
+ template<template<class...> class Target, class T0, class... T, class... Processed>
+ struct UniqueTypesHelper<Target, TypeList<T0, T...>, Processed...>
+ {
+ using type = std::conditional_t<
+ std::disjunction<std::is_same<T0, Processed>...>::value,
+ typename UniqueTypesHelper<Target, TypeList<T...>, Processed...>::type,
+ typename UniqueTypesHelper<Target, TypeList<T...>, T0, Processed...>::type>;
+ };
+
+ // Helper for unpacking Dune::TypeList
+ template<template<class...> class Target, class TL>
+ struct UnpackTypeList;
+
+ template<template<class...> class Target, class... T>
+ struct UnpackTypeList<Target, Dune::TypeList<T...>>
+ {
+ using type = Target<T...>;
+ };
+
+ } // namespace Impl
+
+ /** \brief Unpack Dune::TypeList
+ *
+ * For a given Dune::TypeList<T...> this is an alias for Target<T...>.
+ */
+ template<template<class...> class Target, class TL>
+ using UnpackTypeList_t = typename Impl::UnpackTypeList<Target, TL>::type;
+
+ /** \brief Remove duplicates from a list of types
+ *
+ * For a given list of types T... instantiate Target<S...>, where
+ * S... is generated by removing duplicate types from T... . This
+ * is useful for std::variant which does not like to be instantiated
+ * with duplicate types.
+ */
+ template<template<class...> class Target, class... T>
+ using UniqueTypes_t = typename Impl::UniqueTypesHelper<Target, TypeList<T...>>::type;
+
+ /** \brief Remove duplicates from a Dune::TypeList
+ *
+ * For a given Dune::TypeList<T...> this is an alias for Dune::TypeList<S...>, where
+ * S... is generated by removing duplicate types from T... .
+ */
+ template<class NonUniqueTypeList>
+ using UniqueTypeList_t = typename Impl::UniqueTypesHelper<TypeList, NonUniqueTypeList>::type;
+
+ /** \brief Remove duplicates from a Dune::TypeList
+ *
+ * For a given Dune::TypeList<T...> this return a Dune::TypeList<S...>, where
+ * S... is generated by removing duplicate types from T... .
+ */
+ template<class... T>
+ constexpr auto uniqueTypeList(TypeList<T...> list)
+ {
+ return typename Impl::UniqueTypesHelper<TypeList, TypeList<T...>>::type{};
+ }
+
+
+
+} // namespace Dune
+
+#endif // DUNE_COMMON_TYPELIST_HH
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_TYPETRAITS_HH
+#define DUNE_TYPETRAITS_HH
+
+#include <complex>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+namespace Dune
+{
+
+ namespace Impl
+ {
+ ///
+ /**
+ * @internal
+ * @brief Helper to make void_t work with gcc versions prior to gcc 5.0 and some clang versions.
+ */
+ template <class...>
+ struct voider
+ {
+ using type = void;
+ };
+ }
+
+ //! Is void for all valid input types. The workhorse for C++11 SFINAE-techniques.
+ /**
+ * \ingroup CxxUtilities
+ *
+ * Note, since c++17 there is also `std::void_t` that should be preferred. But, due to an issue
+ * in the c++ standard, see CWG issue #1980. "Equivalent but not functionally-equivalent redeclarations",
+ * and a corresponding failure in some clang compilers, this tool is left here as a workaround.
+ * Use it if you want to specialize multiple classes using `void_t`.
+ */
+ template <class... Types>
+ using void_t = typename Impl::voider<Types...>::type;
+
+ /**
+ * @file
+ * @brief Traits for type conversions and type information.
+ * @author Markus Blatt, Christian Engwer
+ */
+ /** @addtogroup CxxUtilities
+ *
+ * @{
+ */
+
+ /**
+ * @brief Just an empty class
+ */
+ struct Empty {};
+
+ /**
+ * @brief Checks whether two types are interoperable.
+ *
+ * Two types are interoperable if conversions in either directions
+ * exists.
+ */
+ template<class T1, class T2>
+ struct IsInteroperable
+ {
+ enum {
+ /**
+ * @brief True if either a conversion from T1 to T2 or vice versa
+ * exists.
+ */
+ value = std::is_convertible<T1,T2>::value || std::is_convertible<T2,T1>::value
+ };
+ };
+
+ /**
+ * @brief Enable typedef if two types are interoperable.
+ *
+ * (also see IsInteroperable)
+ */
+ template<class T1, class T2, class Type>
+ struct EnableIfInterOperable
+ : public std::enable_if<IsInteroperable<T1,T2>::value, Type>
+ {};
+
+ /**
+ \brief template which always yields a false value
+ \tparam T Some type. It should be a type expression involving template
+ parameters of the class or function using AlwaysFalse.
+
+ Suppose you have a template class. You want to document the required
+ members of this class in the non-specialized template, but you know that
+ actually instantiating the non-specialized template is an error. You
+ can try something like this:
+ \code
+ template<typename T>
+ struct Traits {
+ static_assert(false,
+ "Instanciating this non-specialized template is an "
+ "error. You should use one of the specializations "
+ "instead.");
+ //! The type used to frobnicate T
+ typedef void FrobnicateType;
+ };
+ \endcode
+ This will trigger static_assert() as soon as the compiler reads the
+ definition for the Traits template, since it knows that "false" can
+ never become true, no matter what the template parameters of Traits are.
+ As a workaround you can use AlwaysFalse: replace <tt>false</tt> by
+ <tt>AlwaysFalse<T>::value</tt>, like this:
+ \code
+ template<typename T>
+ struct Traits {
+ static_assert(AlwaysFalse<T>::value,
+ "Instanciating this non-specialized template is an "
+ "error. You should use one of the specializations "
+ "instead.");
+ //! The type used to frobnicate T
+ typedef void FrobnicateType;
+ };
+ \endcode
+ Since there might be an specialization of AlwaysFalse for template
+ parameter T, the compiler cannot trigger static_assert() until the
+ type of T is known, that is, until Traits<T> is instantiated.
+ */
+ template<typename T>
+ struct AlwaysFalse : public std::false_type {};
+
+ /**
+ \brief template which always yields a true value
+ \tparam T Some type. It should be a type expression involving template
+ parameters of the class or function using AlwaysTrue.
+
+ \note This class exists mostly for consistency with AlwaysFalse.
+ */
+ template<typename T>
+ struct AlwaysTrue : public std::true_type {};
+
+ /**
+ * \brief Check if a type is callable with ()-operator and given arguments
+ * \ingroup CxxUtilities
+ *
+ * \tparam D Function descriptor
+ * \tparam R Return value type
+ *
+ * If `D = F(Args...)` this checks if F can be called with an
+ * argument list of type `Args...`, and if the return value can
+ * be converted to R. If R is `void`, any return type is accepted.
+ *
+ * The result is encoded by deriving from
+ * either `std::true_type` or `std::false_type`
+ *
+ * If D is not of the form `F(Args...)` this class is not defined.
+ *
+ * \note This differs from `std::invocable_r` in the way that only
+ * `FunctionObject` types are allowed here while `std::invocable_r`
+ * also accepts pointers to member functions and pointers
+ * to data members (i.e. more general `Callable` types)
+ * \note See https://en.cppreference.com/w/cpp/named_req/FunctionObject
+ * for the description of the named requirement `FunctionObject`
+ * and https://en.cppreference.com/w/cpp/named_req/Callable
+ * for `Callable`.
+ */
+ template<typename D, typename R = void>
+ struct IsCallable;
+
+ /**
+ * \brief Check if a type is callable with ()-operator and given arguments
+ * \ingroup CxxUtilities
+ */
+ template<typename R, typename F, typename... Args>
+ struct IsCallable<F(Args...), R>
+ : public std::bool_constant<
+ std::is_invocable_r_v<R, F, Args...>
+ && !std::is_member_pointer_v<std::decay_t<F>>
+ > {};
+
+ //! \brief Whether this type acts as a scalar in the context of
+ //! (hierarchically blocked) containers
+ /**
+ All types `T` for which `IsNumber<T>::value` is `true` will act as a
+ scalar when used with possibly hierarchically blocked containers, such as
+ `FieldMatrix`, `FieldVector`, `BCRSMatrix`, `BlockVector`,
+ `MultiTypeBlockVector`, etc. This enables earlier error reporting when
+ implementing binary container-scalar operators, such as `=` or `*=`.
+
+ By default is `true` for all arithmetic types (as per
+ `std::is_arithmetic`), and for `T=std::complex<U>`, iff
+ `IsNumber<U>::value` itself is `true`.
+
+ Should be specialized to `true` for e.g. extended precision types or
+ automatic differentiation types, or anything else that might sensibly be
+ an element of a matrix or vector.
+ */
+ template <typename T>
+ struct IsNumber
+ : public std::integral_constant<bool, std::is_arithmetic<T>::value> {
+ };
+
+#ifndef DOXYGEN
+
+ template <typename T>
+ struct IsNumber<std::complex<T>>
+ : public std::integral_constant<bool, IsNumber<T>::value> {
+ };
+
+#endif // DOXYGEN
+
+ //! \brief Whether this type has a value of NaN.
+ /**
+ * Internally, this is just a forward to `std::is_floating_point<T>`.
+ */
+ template <typename T>
+ struct HasNaN
+ : public std::integral_constant<bool, std::is_floating_point<T>::value> {
+ };
+
+#ifndef DOXYGEN
+
+ template <typename T>
+ struct HasNaN<std::complex<T>>
+ : public std::integral_constant<bool, std::is_floating_point<T>::value> {
+ };
+
+#endif // DOXYGEN
+
+#ifndef DOXYGEN
+
+ namespace Impl {
+
+ template<typename T, typename I, typename = int>
+ struct IsIndexable
+ : public std::false_type
+ {};
+
+ template<typename T, typename I>
+ struct IsIndexable<T,I,typename std::enable_if<(sizeof(std::declval<T>()[std::declval<I>()]) > 0),int>::type>
+ : public std::true_type
+ {};
+
+ }
+
+#endif // DOXYGEN
+
+ //! Type trait to determine whether an instance of T has an operator[](I), i.e. whether it can be indexed with an index of type I.
+ /**
+ * \warning Not all compilers support testing for arbitrary index types. In particular, there
+ * are problems with GCC 4.4 and 4.5.
+ */
+ template<typename T, typename I = std::size_t>
+ struct IsIndexable
+ : public Impl::IsIndexable<T,I>
+ {};
+
+#ifndef DOXYGEN
+
+ namespace Impl {
+ // This function does nothing.
+ // By passing expressions to this function one can avoid
+ // "value computed is not used" warnings that may show up
+ // in a comma expression.
+ template<class...T>
+ void ignore(T&&... /*t*/)
+ {}
+ }
+
+#endif // DOXYGEN
+
+ /**
+ \brief typetrait to check that a class has begin() and end() members
+ */
+ // default version, gets picked if SFINAE fails
+ template<typename T, typename = void>
+ struct IsIterable
+ : public std::false_type
+ {};
+
+#ifndef DOXYGEN
+ // version for types with begin() and end()
+ template<typename T>
+ struct IsIterable<T, decltype(Impl::ignore(
+ std::declval<T>().begin(),
+ std::declval<T>().end(),
+ std::declval<T>().begin() != std::declval<T>().end(),
+ decltype(std::declval<T>().begin()){std::declval<T>().end()},
+ ++(std::declval<std::add_lvalue_reference_t<decltype(std::declval<T>().begin())>>()),
+ *(std::declval<T>().begin())
+ ))>
+ : public std::true_type
+ {};
+#endif
+
+#ifndef DOXYGEN
+ // this is just a forward declaration
+ template <class> struct FieldTraits;
+#endif
+
+ //! Convenient access to FieldTraits<Type>::field_type.
+ template <class Type>
+ using field_t = typename FieldTraits<Type>::field_type;
+
+ //! Convenient access to FieldTraits<Type>::real_type.
+ template <class Type>
+ using real_t = typename FieldTraits<Type>::real_type;
+
+
+#ifndef DOXYGEN
+
+ // Implementation of IsTuple
+ namespace Impl {
+
+ template<class T>
+ struct IsTuple : public std::false_type
+ {};
+
+ template<class... T>
+ struct IsTuple<std::tuple<T...>> : public std::true_type
+ {};
+
+ } // namespace Impl
+
+#endif // DOXYGEN
+
+ /**
+ * \brief Check if T is a std::tuple<...>
+ *
+ * The result is exported by deriving from std::true_type or std::false_type.
+ */
+ template<class T>
+ struct IsTuple :
+ public Impl::IsTuple<T>
+ {};
+
+
+#ifndef DOXYGEN
+
+ // Implementation of IsTupleOrDerived
+ namespace Impl {
+
+ template<class... T, class Dummy>
+ std::true_type isTupleOrDerived(const std::tuple<T...>*, Dummy)
+ { return {}; }
+
+ template<class Dummy>
+ std::false_type isTupleOrDerived(const void*, Dummy)
+ { return {}; }
+
+ } // namespace Impl
+
+#endif // DOXYGEN
+
+ /**
+ * \brief Check if T derived from a std::tuple<...>
+ *
+ * The result is exported by deriving from std::true_type or std::false_type.
+ */
+ template<class T>
+ struct IsTupleOrDerived :
+ public decltype(Impl::isTupleOrDerived(std::declval<T*>(), true))
+ {};
+
+
+#ifndef DOXYGEN
+
+ // Implementation of is IsIntegralConstant
+ namespace Impl {
+
+ template<class T>
+ struct IsIntegralConstant : public std::false_type
+ {};
+
+ template<class T, T t>
+ struct IsIntegralConstant<std::integral_constant<T, t>> : public std::true_type
+ {};
+
+ } // namespace Impl
+
+#endif // DOXYGEN
+
+ /**
+ * \brief Check if T is an std::integral_constant<I, i>
+ *
+ * The result is exported by deriving from std::true_type or std::false_type.
+ */
+ template<class T>
+ struct IsIntegralConstant : public Impl::IsIntegralConstant<std::decay_t<T>>
+ {};
+
+
+
+ /**
+ * \brief Compute size of variadic type list
+ *
+ * \tparam T Variadic type list
+ *
+ * The ::value member gives the size of the variadic type list T...
+ * This should be equivalent to sizeof...(T). However, with clang
+ * the latter may produce wrong results if used in template aliases
+ * due to clang bug 14858 (https://llvm.org/bugs/show_bug.cgi?id=14858).
+ *
+ * As a workaround one can use SizeOf<T...>::value instead of sizeof...(T)
+ * in template aliases for any code that should work with clang < 3.8.
+ */
+ template<typename... T>
+ struct SizeOf
+ : public std::integral_constant<std::size_t,sizeof...(T)>
+ {};
+
+
+#ifndef DOXYGEN
+
+ namespace Impl {
+
+ template<class T, T...>
+ struct IntegerSequenceHelper;
+
+ // Helper struct to compute the i-th entry of a std::integer_sequence
+ //
+ // This could also be implemented using std::get<index>(std::make_tuple(t...)).
+ // However, the gcc-6 implementation of std::make_tuple increases the instantiation
+ // depth by 15 levels for each argument, such that the maximal instantiation depth
+ // is easily hit, especially with clang where it is set to 256.
+ template<class T, T head, T... tail>
+ struct IntegerSequenceHelper<T, head, tail...>
+ {
+
+ // get first entry
+ static constexpr auto get(std::integral_constant<std::size_t, 0>)
+ {
+ return std::integral_constant<T, head>();
+ }
+
+ // call get with first entry cut off and decremented index
+ template<std::size_t index,
+ std::enable_if_t<(index > 0) and (index < sizeof...(tail)+1), int> = 0>
+ static constexpr auto get(std::integral_constant<std::size_t, index>)
+ {
+ return IntegerSequenceHelper<T, tail...>::get(std::integral_constant<std::size_t, index-1>());
+ }
+
+ // use static assertion if index exceeds size
+ template<std::size_t index,
+ std::enable_if_t<(index >= sizeof...(tail)+1), int> = 0>
+ static constexpr auto get(std::integral_constant<std::size_t, index>)
+ {
+ static_assert(index < sizeof...(tail)+1, "index used in IntegerSequenceEntry exceed size");
+ }
+ };
+
+ } // end namespace Impl
+
+#endif // DOXYGEN
+
+
+ /**
+ * \brief Get entry of std::integer_sequence
+ *
+ * \param seq An object of type std::integer_sequence<...>
+ * \param i Index
+ *
+ * \return The i-th entry of the integer_sequence encoded as std::integral_constant<std::size_t, entry>.
+ *
+ */
+ template<class T, T... t, std::size_t index>
+ constexpr auto integerSequenceEntry(std::integer_sequence<T, t...> /*seq*/, std::integral_constant<std::size_t, index> i)
+ {
+ static_assert(index < sizeof...(t), "index used in IntegerSequenceEntry exceed size");
+ return Impl::IntegerSequenceHelper<T, t...>::get(i);
+ }
+
+
+ /**
+ * \brief Get entry of std::integer_sequence
+ *
+ * Computes the i-th entry of the integer_sequence. The result
+ * is exported as ::value by deriving form std::integral_constant<std::size_t, entry>.
+ */
+ template<class IntegerSequence, std::size_t index>
+ struct IntegerSequenceEntry;
+
+#ifndef DOXYGEN
+
+ template<class T, T... t, std::size_t i>
+ struct IntegerSequenceEntry<std::integer_sequence<T, t...>, i>
+ : public decltype(Impl::IntegerSequenceHelper<T, t...>::get(std::integral_constant<std::size_t, i>()))
+ {};
+
+#endif // DOXYGEN
+
+ /**
+ * \brief Type free of internal references that `T` can be converted to.
+ *
+ * This is the specialization point for `AutonomousValue` and `autoCopy()`.
+ *
+ * If you need to specialize for a proxy type or similar, just specialize
+ * for the plain type. There are already specializations for
+ * reference-qualified and cv-qualified types that will just forward to your
+ * specailixszation.
+ *
+ * \note For all specializations, the member type `type` should be
+ * constructible from `T`.
+ */
+ template<class T>
+ struct AutonomousValueType { using type = T; };
+
+ //! Specialization to remove lvalue references
+ template<class T>
+ struct AutonomousValueType<T&> : AutonomousValueType<T> {};
+
+ //! Specialization to remove rvalue references
+ template<class T>
+ struct AutonomousValueType<T&&> : AutonomousValueType<T> {};
+
+ //! Specialization to remove const qualifiers
+ template<class T>
+ struct AutonomousValueType<const T> : AutonomousValueType<T> {};
+
+ //! Specialization to remove volatile qualifiers
+ template<class T>
+ struct AutonomousValueType<volatile T> : AutonomousValueType<T> {};
+
+ //! Specialization for the proxies of `vector<bool>`
+ template<>
+ struct AutonomousValueType<std::vector<bool>::reference>
+ {
+ using type = bool;
+ };
+
+ //! Specialization to remove both const and volatile qualifiers
+ template<class T>
+ struct AutonomousValueType<volatile const T> : AutonomousValueType<T> {};
+
+ /**
+ * \brief Type free of internal references that `T` can be converted to.
+ *
+ * Specialize `AutonomousValueType` to add your own mapping. Use
+ * `autoCopy()` to convert an expression of type `T` to
+ * `AutonomousValue<T>`.
+ *
+ * This type alias determines a type that `T` can be converted to, but that
+ * will be free of references to other objects that it does not manage. In
+ * practice it will act like `std::decay_t`, but in addition to removing
+ * references it will also determine the types that proxies stand in for,
+ * and the types that expression templates will evaluate to.
+ *
+ * "Free of references" means that the converted object will always be valid
+ * and does not alias any other objects directly or indirectly. The "other
+ * objects that it does not manage" restriction means that the converted
+ * object may still contain internal references, but they must be to
+ * resources that it manages itself. So, an `std::vector` would be an
+ * autonomous value even though it contains internal references to the
+ * storage for the elements since it manages that storage itself.
+ *
+ * \note For pointers, iterators, and the like the "value" for the purpose
+ * of `AutonomousValue` is considered to be the identity of the
+ * pointed-to object, so that object should not be cloned. But then
+ * you should hopefully never need an autonomous value for those
+ * anyway...
+ */
+ template<class T>
+ using AutonomousValue = typename AutonomousValueType<T>::type;
+
+ /**
+ * \brief Autonomous copy of an expression's value for use in `auto` type
+ * deduction
+ *
+ * This function is an unproxyfier or an expression evaluator or a fancy
+ * cast to ensure an expression can be used in `auto` type deduction. It
+ * ensures two things:
+ *
+ * 1. The return value is a prvalue,
+ * 2. the returned value is self-sufficient, or "autonomous".
+ *
+ * The latter means that there will be no references into other objects
+ * (like containers) which are not guaranteed to be kept alive during the
+ * lifetime of the returned value.
+ *
+ * An example usage would be
+ * ```c++
+ * std::vector<bool> bitvector{24};
+ * auto value = autoCopy(bitvector[23]);
+ * bitvector.resize(42);
+ * // value still valid
+ * ```
+ * Since `vector<bool>` may use proxies, `auto value = bitvector[23];` would
+ * mean that the type of `value` is such a proxy. The proxy keeps internal
+ * references into the vector, and thus can be invalidated by anything that
+ * modifies the vector -- such as a later call to `resize()`. `autoCopy()`
+ * lets you work around that problem by copying the value referenced by the
+ * proxy and converting it to a `bool`.
+ *
+ * Another example would be an automatic differentiation library that lets
+ * you track the operations in a computation, and later ask for derivatives.
+ * Imagine that your operation involves a parameter function, and you want
+ * to use that function both with plain types and with automatic
+ * differentiation types. You might write the parameter function as
+ * follows:
+ * ```c++
+ * template<class NumberType>
+ * auto param(NumberType v)
+ * {
+ * return 2*v;
+ * }
+ * ```
+ * If the automatic differentiation library is Adept, this would lead to
+ * use-after-end-of-life-bugs. The reason is that for efficiency reasons
+ * Adept does not immidiately evaluate the expression, but instead it
+ * constructs an expression object that records the kind of expression and
+ * references to the operands. The expression object is only evaluated when
+ * it is assigned to an object of some number type -- which will only happen
+ * after the operands (`v` and the temporary object representing `2`) have
+ * gone out of scope and been destroyed. Basically, Adept was invented
+ * before `auto` and rvalue-references were a thing.
+ *
+ * This can be handled with `autoCopy()`:
+ * ```c++
+ * template<class NumberType>
+ * auto param(NumberType v)
+ * {
+ * return autoCopy(2*v);
+ * }
+ * ```
+ * Of course, `autoCopy()` needs to be taught about the expression
+ * objects of Adept for this to work.
+ *
+ * `autoCopy()` will by default simply return the argument as a prvalue of
+ * the same type with cv-qualifiers removed. This involves one or more
+ * copy/move operation, so it will only work with types that are in fact
+ * copyable. And it will incur one copy if the compiler cannot use a move,
+ * such as when the type of the expression is a `std::array` or a
+ * `FieldMatrix`. (Any second copy that may semantically be necessary will
+ * be elided.)
+ *
+ * To teach `autoCopy()` about a particular proxy type, specialize
+ * `Dune::AutonomousValueType`.
+ *
+ * \note Do not overload `Dune::autoCopy()` directly. It is meant to be
+ * found by unqualified or qualified lookup, not by ADL. There is
+ * little guarantee that your overload will be declared before the
+ * definition of internal Dune functions that use `autoCopy()`. They
+ * would need the lazy binding provided by ADL to find your overload,
+ * but they will probably use unqualified lookup.
+ */
+ template<class T>
+ constexpr AutonomousValue<T> autoCopy(T &&v)
+ {
+ return v;
+ }
+
+ /** @} */
+}
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_COMMON_TYPEUTILITIES_HH
+#define DUNE_COMMON_TYPEUTILITIES_HH
+
+#include <cstddef>
+#include <type_traits>
+#include <tuple>
+
+
+namespace Dune {
+
+ /**
+ * \file
+ * \brief Utilities for type computations, constraining overloads, ...
+ * \author Carsten Gräser
+ */
+
+
+ namespace Impl
+ {
+
+ template<class This, class... T>
+ struct disableCopyMoveHelper : public std::is_base_of<This, std::tuple_element_t<0, std::tuple<std::decay_t<T>...>>>
+ {};
+
+ template<class This>
+ struct disableCopyMoveHelper<This> : public std::false_type
+ {};
+
+ } // namespace Impl
+
+
+ /**
+ * \brief Helper to disable constructor as copy and move constructor
+ *
+ * \ingroup TypeUtilities
+ *
+ * Helper typedef to remove constructor with forwarding reference from
+ * overload set for copy and move constructor or assignment.
+ */
+ template<class This, class... T>
+ using disableCopyMove = std::enable_if_t< not Impl::disableCopyMoveHelper<This, T...>::value, int>;
+
+
+
+ /**
+ * \brief Helper class for tagging priorities.
+ *
+ * \ingroup TypeUtilities
+ *
+ * When using multiple overloads of a function
+ * where some are removed from the overload set
+ * via SFINAE, the remaining overloads may be ambiguous.
+ * A prototypic example would be a default overload
+ * that should be used if the others do not apply.
+ *
+ * By adding additional arguments of type PriorityTag<k>
+ * with increasing priority k to all overloads and calling
+ * the method with PriorityTag<m> where m is larger or equal
+ * to the maximal used priority, those can be made unambiguous.
+ *
+ * In this case the matching overload with highest priority
+ * will be used. This is achieved by the fact that PriorityTag<k>
+ * derives from all types PriorityTag<i> with i less than k.
+ *
+ * \tparam priority The priority of this tag.
+ */
+ template<std::size_t priority>
+ struct PriorityTag : public PriorityTag<priority-1>
+ {
+ static constexpr std::size_t value = priority;
+ };
+
+ /**
+ * \brief Helper class for tagging priorities.
+ *
+ * \ingroup TypeUtilities
+ *
+ * PriorityTag<0> does not derive from any
+ * other PriorityTag.
+ */
+ template<>
+ struct PriorityTag<0>
+ {
+ static constexpr std::size_t value = 0;
+ };
+
+
+
+} // namespace Dune
+
+
+
+#endif // DUNE_COMMON_TYPEUTILITIES_HH
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_COMMON_UNUSED_HH
+#define DUNE_COMMON_UNUSED_HH
+
+/** \file
+ * \brief Definition of the DUNE_UNUSED macro for the case that config.h
+ * is not available
+ */
+
+#ifndef HAS_ATTRIBUTE_UNUSED
+//! A macro for marking variables that the compiler mistakenly flags as unused, which sometimes happens due to templates.
+/**
+ * \ingroup CxxUtilities
+ * \deprecated Use C++17's \code[[maybe_unused]]\endcode instead. This
+ * macro will be removed after Dune 2.8. Be aware that it must be
+ * sometimes placed at a different position in the code.
+ */
+#define DUNE_UNUSED
+#else
+#ifdef __GNUC__
+# define DUNE_UNUSED _Pragma("GCC warning \"DUNE_UNUSED is deprecated\"") __attribute__((unused))
+#else
+# define DUNE_UNUSED _Pragma("message \"DUNE_UNUSED is deprecated\"") __attribute__((unused))
+#endif
+#endif
+
+/**
+ * A macro to mark intentionally unused function parameters with.
+ *
+ * If possible use C++17's \code[[maybe_unused]]\endcode instead.
+ * Due to a bug prior to GCC 9.3 it cannot be used for the first
+ * argument of a constructor (bug 81429).
+ *
+ * \ingroup CxxUtilities
+ */
+#define DUNE_UNUSED_PARAMETER(parm) static_cast<void>(parm)
+#endif
--- /dev/null
+#ifndef DUNE_COMMON_VC_HH
+#define DUNE_COMMON_VC_HH
+
+/**
+ \file
+
+ \brief Compatibility header for including <Vc/Vc>
+
+ Certain versions (1.3.2) of Vc (https://github.com/VcDevel/Vc) have a
+ problem with certain compiler versions (g++ 7.2.0) in c++17 mode, see #88.
+ */
+
+#if HAVE_VC
+
+// include Vc's macro definitions
+#include <Vc/global.h>
+
+// undefine the macro that signals C++17 support, if set
+#ifdef Vc_CXX17
+#undef Vc_CXX17
+#endif
+
+// include the rest of Vc
+#include <Vc/Vc>
+
+#endif // HAVE_VC
+
+#endif // DUNE_COMMON_VC_HH
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_VERSION_HH
+#define DUNE_VERSION_HH
+
+/** \file
+ * \brief Various macros to work with %Dune module version numbers
+ */
+
+/** \brief Constructs the preprocessor name used in config.h to hold version numbers
+ *
+ * For the DUNE core modules you need to use the following module names:
+ * - DUNE_COMMON for dune-common
+ * - DUNE_GRID for dune-grid
+ * - DUNE_GEOMETRY for dune-geometry
+ * - DUNE_ISTL for dune-istl
+ * - DUNE_LOCALFUNCTIONS for dune-localfunctions
+ *
+ * For external DUNE modules, you should capitalize the name and
+ * replace '-' by underscores. For example for the module foo-bar you
+ * need to use FOO_BAR as module name in the context of this macro.
+ *
+ * \param module The name of the Dune module
+ * \param type The version number type, one of MAJOR, MINOR, or REVISION
+ */
+#define DUNE_VERSION_JOIN(module,type) module ## _VERSION_ ## type
+
+/**
+ * \brief True if 'module' has the version major.minor
+ *
+ * For the DUNE core modules you need to use the following module names:
+ * - DUNE_COMMON for dune-common
+ * - DUNE_GRID for dune-grid
+ * - DUNE_GEOMETRY for dune-geometry
+ * - DUNE_ISTL for dune-istl
+ * - DUNE_LOCALFUNCTIONS for dune-localfunctions
+ *
+ * For external DUNE modules, you should capitalize the name and
+ * replace '-' by underscores. For example for the module foo-bar you
+ * need to use FOO_BAR as module name in the context of this macro.
+ */
+#define DUNE_VERSION_EQUAL(module,major,minor) \
+ ((DUNE_VERSION_JOIN(module,MAJOR) == major) && \
+ (DUNE_VERSION_JOIN(module,MINOR) == minor))
+
+/**
+ * \brief True if 'module' has the version major.minor.revision
+ *
+ * For the DUNE core modules you need to use the following module names:
+ * - DUNE_COMMON for dune-common
+ * - DUNE_GRID for dune-grid
+ * - DUNE_GEOMETRY for dune-geometry
+ * - DUNE_ISTL for dune-istl
+ * - DUNE_LOCALFUNCTIONS for dune-localfunctions
+ *
+ * For external DUNE modules, you should capitalize the name and
+ * replace '-' by underscores. For example for the module foo-bar you
+ * need to use FOO_BAR as module name in the context of this macro.
+ */
+#define DUNE_VERSION_EQUAL_REV(module,major,minor,revision) \
+ ( DUNE_VERSION_EQUAL(module,major,minor) && \
+ (DUNE_VERSION_JOIN(module,REVISION) == revision))
+
+/**
+ * \brief True if 'module' has the version major.minor or greater
+ *
+ * For the DUNE core modules you need to use the following module names:
+ * - DUNE_COMMON for dune-common
+ * - DUNE_GRID for dune-grid
+ * - DUNE_GEOMETRY for dune-geometry
+ * - DUNE_ISTL for dune-istl
+ * - DUNE_LOCALFUNCTIONS for dune-localfunctions
+ *
+ * For external DUNE modules, you should capitalize the name and
+ * replace '-' by underscores. For example for the module foo-bar you
+ * need to use FOO_BAR as module name in the context of this macro.
+ */
+#define DUNE_VERSION_GTE(module,major,minor) \
+ ((DUNE_VERSION_JOIN(module,MAJOR) > major) \
+ || ((DUNE_VERSION_JOIN(module,MAJOR) == major) && (DUNE_VERSION_JOIN(module,MINOR) >= minor)))
+
+/**
+ * \brief True if 'module' has a version less than major.minor
+ *
+ * For the DUNE core modules you need to use the following module names:
+ * - DUNE_COMMON for dune-common
+ * - DUNE_GRID for dune-grid
+ * - DUNE_GEOMETRY for dune-geometry
+ * - DUNE_ISTL for dune-istl
+ * - DUNE_LOCALFUNCTIONS for dune-localfunctions
+ *
+ * For external DUNE modules, you should capitalize the name and
+ * replace '-' by underscores. For example for the module foo-bar you
+ * need to use FOO_BAR as module name in the context of this macro.
+ */
+#define DUNE_VERSION_LT(module,major,minor) \
+ ! DUNE_VERSION_GTE(module,major,minor)
+
+/**
+ * \brief True if 'module' has the version major.minor or newer
+ * \note Deprecated, use DUNE_VERSION_GTE instead.
+ *
+ * For the DUNE core modules you need to use the following module names:
+ * - DUNE_COMMON for dune-common
+ * - DUNE_GRID for dune-grid
+ * - DUNE_GEOMETRY for dune-geometry
+ * - DUNE_ISTL for dune-istl
+ * - DUNE_LOCALFUNCTIONS for dune-localfunctions
+ *
+ * For external DUNE modules, you should capitalize the name and
+ * replace '-' by underscores. For example for the module foo-bar you
+ * need to use FOO_BAR as module name in the context of this macro.
+ */
+#define DUNE_VERSION_NEWER(module,major,minor) \
+ DUNE_VERSION_GTE(module,major,minor)
+
+/**
+ * \brief True if 'module' has a version greater than major.minor
+ *
+ * For the DUNE core modules you need to use the following module names:
+ * - DUNE_COMMON for dune-common
+ * - DUNE_GRID for dune-grid
+ * - DUNE_GEOMETRY for dune-geometry
+ * - DUNE_ISTL for dune-istl
+ * - DUNE_LOCALFUNCTIONS for dune-localfunctions
+ *
+ * For external DUNE modules, you should capitalize the name and
+ * replace '-' by underscores. For example for the module foo-bar you
+ * need to use FOO_BAR as module name in the context of this macro.
+ */
+#define DUNE_VERSION_GT(module,major,minor) \
+ ((DUNE_VERSION_JOIN(module,MAJOR) > major) \
+ || ((DUNE_VERSION_JOIN(module,MAJOR) == major) && (DUNE_VERSION_JOIN(module,MINOR) > minor)))
+
+/**
+ * \brief True if 'module' has a version less than or equal to major.minor
+ *
+ * For the DUNE core modules you need to use the following module names:
+ * - DUNE_COMMON for dune-common
+ * - DUNE_GRID for dune-grid
+ * - DUNE_GEOMETRY for dune-geometry
+ * - DUNE_ISTL for dune-istl
+ * - DUNE_LOCALFUNCTIONS for dune-localfunctions
+ *
+ * For external DUNE modules, you should capitalize the name and
+ * replace '-' by underscores. For example for the module foo-bar you
+ * need to use FOO_BAR as module name in the context of this macro.
+ */
+#define DUNE_VERSION_LTE(module,major,minor) \
+ ! DUNE_VERSION_GT(module,major,minor)
+
+/**
+ * \brief True if 'module' has the version major.minor.revision or greater
+ *
+ * For the DUNE core modules you need to use the following module names:
+ * - DUNE_COMMON for dune-common
+ * - DUNE_GRID for dune-grid
+ * - DUNE_GEOMETRY for dune-geometry
+ * - DUNE_ISTL for dune-istl
+ * - DUNE_LOCALFUNCTIONS for dune-localfunctions
+ *
+ * For external DUNE modules, you should capitalize the name and
+ * replace '-' by underscores. For example for the module foo-bar you
+ * need to use FOO_BAR as module name in the context of this macro.
+ */
+#define DUNE_VERSION_GTE_REV(module,major,minor,revision) \
+ ((DUNE_VERSION_JOIN(module,MAJOR) > major) \
+ || ((DUNE_VERSION_JOIN(module,MAJOR) == major) && (DUNE_VERSION_JOIN(module,MINOR) > minor)) \
+ || ((DUNE_VERSION_JOIN(module,MAJOR) == major) && (DUNE_VERSION_JOIN(module,MINOR) == minor) \
+ && (DUNE_VERSION_JOIN(module,REVISION) >= revision)))
+
+/**
+ * \brief True if 'module' has a version lower than major.minor.revision
+ *
+ * For the DUNE core modules you need to use the following module names:
+ * - DUNE_COMMON for dune-common
+ * - DUNE_GRID for dune-grid
+ * - DUNE_GEOMETRY for dune-geometry
+ * - DUNE_ISTL for dune-istl
+ * - DUNE_LOCALFUNCTIONS for dune-localfunctions
+ *
+ * For external DUNE modules, you should capitalize the name and
+ * replace '-' by underscores. For example for the module foo-bar you
+ * need to use FOO_BAR as module name in the context of this macro.
+ */
+#define DUNE_VERSION_LT_REV(module,major,minor,revision) \
+ ! DUNE_VERSION_GTE_REV(module,major,minor,revision)
+
+/**
+ * \brief True if 'module' has the version major.minor.revision or newer
+ * \note Deprecated, use DUNE_VERSION_GTE_REV instead.
+ *
+ * For the DUNE core modules you need to use the following module names:
+ * - DUNE_COMMON for dune-common
+ * - DUNE_GRID for dune-grid
+ * - DUNE_GEOMETRY for dune-geometry
+ * - DUNE_ISTL for dune-istl
+ * - DUNE_LOCALFUNCTIONS for dune-localfunctions
+ *
+ * For external DUNE modules, you should capitalize the name and
+ * replace '-' by underscores. For example for the module foo-bar you
+ * need to use FOO_BAR as module name in the context of this macro.
+ */
+#define DUNE_VERSION_NEWER_REV(module,major,minor,revision) \
+ DUNE_VERSION_GTE_REV(module,major,minor,revision)
+
+/**
+ * \brief True if 'module' has a greater version than major.minor.revision
+ *
+ * For the DUNE core modules you need to use the following module names:
+ * - DUNE_COMMON for dune-common
+ * - DUNE_GRID for dune-grid
+ * - DUNE_GEOMETRY for dune-geometry
+ * - DUNE_ISTL for dune-istl
+ * - DUNE_LOCALFUNCTIONS for dune-localfunctions
+ *
+ * For external DUNE modules, you should capitalize the name and
+ * replace '-' by underscores. For example for the module foo-bar you
+ * need to use FOO_BAR as module name in the context of this macro.
+ */
+#define DUNE_VERSION_GT_REV(module,major,minor,revision) \
+ ((DUNE_VERSION_JOIN(module,MAJOR) > major) \
+ || ((DUNE_VERSION_JOIN(module,MAJOR) == major) && (DUNE_VERSION_JOIN(module,MINOR) > minor)) \
+ || ((DUNE_VERSION_JOIN(module,MAJOR) == major) && (DUNE_VERSION_JOIN(module,MINOR) == minor) \
+ && (DUNE_VERSION_JOIN(module,REVISION) > revision)))
+
+/**
+ * \brief True if 'module' has a version lower or equal to major.minor.revision
+ *
+ * For the DUNE core modules you need to use the following module names:
+ * - DUNE_COMMON for dune-common
+ * - DUNE_GRID for dune-grid
+ * - DUNE_GEOMETRY for dune-geometry
+ * - DUNE_ISTL for dune-istl
+ * - DUNE_LOCALFUNCTIONS for dune-localfunctions
+ *
+ * For external DUNE modules, you should capitalize the name and
+ * replace '-' by underscores. For example for the module foo-bar you
+ * need to use FOO_BAR as module name in the context of this macro.
+ */
+#define DUNE_VERSION_LTE_REV(module,major,minor,revision) \
+ ! DUNE_VERSION_GT_REV(module,major,minor,revision)
+
+/**
+ * \brief Compute a unique uint id from the major, minor, and revision numbers
+ *
+ * For the DUNE core modules you need to use the following module names:
+ * - DUNE_COMMON for dune-common
+ * - DUNE_GRID for dune-grid
+ * - DUNE_GEOMETRY for dune-geometry
+ * - DUNE_ISTL for dune-istl
+ * - DUNE_LOCALFUNCTIONS for dune-localfunctions
+ *
+ * For external DUNE modules, you should capitalize the name and
+ * replace '-' by underscores. For example for the module foo-bar you
+ * need to use FOO_BAR as module name in the context of this macro.
+ */
+#define DUNE_VERSION_ID(major,minor,revision) \
+ ((unsigned int)((major << 24) + (minor << 16) + revision))
+
+/**
+ * \brief Compute a unique uint id for the given module
+ *
+ * For the DUNE core modules you need to use the following module names:
+ * - DUNE_COMMON for dune-common
+ * - DUNE_GRID for dune-grid
+ * - DUNE_GEOMETRY for dune-geometry
+ * - DUNE_ISTL for dune-istl
+ * - DUNE_LOCALFUNCTIONS for dune-localfunctions
+ *
+ * For external DUNE modules, you should capitalize the name and
+ * replace '-' by underscores. For example for the module foo-bar you
+ * need to use FOO_BAR as module name in the context of this macro.
+ */
+#define DUNE_MODULE_VERSION_ID(module) \
+ DUNE_VERSION_ID( DUNE_VERSION_JOIN(module,MAJOR), DUNE_VERSION_JOIN(module,MINOR), DUNE_VERSION_JOIN(module,REVISION) )
+
+#endif
--- /dev/null
+// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_COMMON_VISIBILITY_HH
+#define DUNE_COMMON_VISIBILITY_HH
+
+/** \file
+ * \brief Definition of macros controlling symbol visibility at the ABI level.
+ */
+
+#ifdef DOXYGEN
+
+//! Export a symbol as part of the public ABI.
+/**
+ * Mark a class, function or static variable as visible outside the current DSO.
+ * For now, this is mostly important for templated global variables and functions
+ * that contain static variables.
+ */
+#define DUNE_EXPORT implementation_defined
+
+//! Mark a symbol as being for internal use within the current DSO only.
+/**
+ * Mark a class, function or static variable as inaccessible from outside the current DSO.
+ * Doing so will decrease the size of the symbol table, but you have to be sure that the
+ * symbol will never have to be accessed from another library or the main executable!
+ */
+#define DUNE_PRIVATE implementation_defined
+
+#else // DOXYGEN
+
+#if __GNUC__ >= 4
+// GCC and Clang both define __GNUC__ to 4 and they both support the visibility
+// attribute
+#define DUNE_EXPORT __attribute__((visibility("default")))
+#define DUNE_PRIVATE __attribute__((visibility("hidden")))
+#else
+// We don't know about the active compiler, so just turn the visibility macros to no-ops.
+#define DUNE_EXPORT
+#define DUNE_PRIVATE
+#endif
+
+#endif // DOXYGEN
+
+#endif // DUNE_COMMON_VISIBILITY
--- /dev/null
+add_subdirectory(common)
+add_subdirectory(test)
+add_subdirectory(pybind11)
--- /dev/null
+set(HEADERS
+ densematrix.hh
+ densevector.hh
+ dimrange.hh
+ dynmatrix.hh
+ dynvector.hh
+ fmatrix.hh
+ fvecmatregistry.hh
+ fvector.hh
+ getdimension.hh
+ logger.hh
+ mpihelper.hh
+ numpycommdatahandle.hh
+ numpyvector.hh
+ pythonvector.hh
+ string.hh
+ typeregistry.hh
+ vector.hh
+)
+
+install(FILES ${HEADERS} DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/dune/python/common)
--- /dev/null
+#ifndef DUNE_PYTHON_COMMON_DENSEMATRIX_HH
+#define DUNE_PYTHON_COMMON_DENSEMATRIX_HH
+
+#include <string>
+#include <tuple>
+#include <type_traits>
+#include <utility>
+
+#include <dune/python/pybind11/extensions.h>
+#include <dune/python/pybind11/operators.h>
+#include <dune/python/pybind11/pybind11.h>
+
+namespace Dune
+{
+
+ namespace Python
+ {
+
+ // registerDenseMatrix
+ // -------------------
+
+ template< class Matrix >
+ void registerDenseMatrix ( pybind11::class_< Matrix > cls )
+ {
+ typedef typename Matrix::field_type field_type;
+ typedef typename Matrix::row_type row_type;
+ typedef typename Matrix::row_reference row_reference;
+
+ cls.def( "__getitem__", [] ( Matrix &self, std::size_t i ) -> row_reference {
+ if( i < self.mat_rows() )
+ return self[ i ];
+ else
+ throw pybind11::index_error();
+ }, (std::is_reference< row_reference >::value ? pybind11::return_value_policy::reference : pybind11::return_value_policy::move), pybind11::keep_alive< 0, 1 >() );
+
+ cls.def( "__setitem__", [] ( Matrix &self, std::size_t i, pybind11::object l ) {
+ if( i < self.mat_rows() )
+ {
+ row_type v = l.cast< row_type >();
+ std::size_t size = std::min( self.mat_cols(), v.size() );
+
+ for( std::size_t j = 0; j < size; ++j )
+ self[ i ][ j ] = v[ j ];
+ }
+ else
+ throw pybind11::index_error();
+ } );
+
+ cls.def( "__len__", [] ( const Matrix &self ) -> std::size_t { return self.size(); } );
+
+ cls.def( "invert", [] ( Matrix &self ) { self.invert(); } );
+
+ cls.def( pybind11::self += pybind11::self );
+ cls.def( pybind11::self -= pybind11::self );
+ cls.def( pybind11::self *= field_type() );
+ cls.def( pybind11::self /= field_type() );
+
+ cls.def( pybind11::self == pybind11::self );
+ cls.def( pybind11::self != pybind11::self );
+
+ cls.def_property_readonly( "frobenius_norm", [] ( const Matrix &self ) { return self.frobenius_norm(); } );
+ cls.def_property_readonly( "frobenius_norm2", [] ( const Matrix &self ) { return self.frobenius_norm2(); } );
+ cls.def_property_readonly( "infinity_norm", [] ( const Matrix &self ) { return self.infinity_norm(); } );
+ cls.def_property_readonly( "infinity_norm_real", [] ( const Matrix &self ) { return self.infinity_norm_real(); } );
+
+ cls.def_property_readonly( "rows", [] ( const Matrix &self ) { return self.mat_rows(); } );
+ cls.def_property_readonly( "cols", [] ( const Matrix &self ) { return self.mat_cols(); } );
+ }
+
+ } // namespace Python
+
+} // namespace Dune
+
+#endif // #ifndef DUNE_PYTHON_COMMON_DENSEMATRIX_HH
--- /dev/null
+// -*- tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_PYTHON_COMMON_DENSEVECTOR_HH
+#define DUNE_PYTHON_COMMON_DENSEVECTOR_HH
+
+#include <string>
+#include <tuple>
+#include <type_traits>
+#include <utility>
+
+#include <dune/python/common/vector.hh>
+#include <dune/python/pybind11/pybind11.h>
+#include <dune/python/pybind11/operators.h>
+
+namespace Dune
+{
+
+ namespace Python
+ {
+
+ namespace detail
+ {
+
+ // registerScalarCopyingDenseVectorMethods
+ // ---------------------------------------
+
+ template< class T, class... options >
+ inline static std::enable_if_t< std::is_copy_constructible< T >::value && (T::dimension == 1) >
+ registerScalarCopyingDenseVectorMethods ( pybind11::class_< T, options... > cls, PriorityTag< 2 > )
+ {
+ using ValueType = typename T::value_type;
+
+ cls.def( "__add__", [] ( const T &self, int a ) { T *copy = new T( self ); (*copy)[ 0 ] += ValueType( a ); return copy; } );
+ cls.def( "__add__", [] ( const T &self, const ValueType &a ) { T *copy = new T( self ); (*copy)[ 0 ] += a; return copy; } );
+ cls.def( "__sub__", [] ( const T &self, int a ) { T *copy = new T( self ); (*copy)[ 0 ] -= ValueType( a ); return copy; } );
+ cls.def( "__sub__", [] ( const T &self, const ValueType &a ) { T *copy = new T( self ); (*copy)[ 0 ] -= a; return copy; } );
+
+ cls.def( "__radd__", [] ( const T &self, int a ) { T *copy = new T( self ); (*copy)[ 0 ] = ValueType( a ) + (*copy)[ 0 ]; return copy; } );
+ cls.def( "__radd__", [] ( const T &self, const ValueType &a ) { T *copy = new T( self ); (*copy)[ 0 ] = a + (*copy)[ 0 ]; return copy; } );
+ cls.def( "__rsub__", [] ( const T &self, int a ) { T *copy = new T( self ); (*copy)[ 0 ] = ValueType( a ) - (*copy)[ 0 ]; return copy; } );
+ cls.def( "__rsub__", [] ( const T &self, const ValueType &a ) { T *copy = new T( self ); (*copy)[ 0 ] = a - (*copy)[ 0 ]; return copy; } );
+ }
+
+ template< class T, class... options >
+ inline static std::enable_if_t< std::is_copy_constructible< T >::value >
+ registerScalarCopyingDenseVectorMethods ( pybind11::class_< T, options... > cls, PriorityTag< 1 > )
+ {
+ using ValueType = typename T::value_type;
+
+ cls.def( "__add__", [] ( pybind11::object self, int a ) {
+ if( a != 0 )
+ throw pybind11::value_error( "Cannot add " + std::to_string( a ) + " to multidimensional dense vector." );
+ return self;
+ } );
+ cls.def( "__sub__", [] ( pybind11::object self, int a ) {
+ if( a != 0 )
+ throw pybind11::value_error( "Cannot subtract " + std::to_string( a ) + " from multidimensional dense vector." );
+ return self;
+ } );
+
+ cls.def( "__radd__", [] ( pybind11::object self, int a ) {
+ if( a != 0 )
+ throw pybind11::value_error( "Cannot add multidimensional dense vector to " + std::to_string( a ) + "." );
+ return self;
+ } );
+ cls.def( "__rsub__", [] ( const T &self, int a ) {
+ if( a != 0 )
+ throw pybind11::value_error( "Cannot subtract multidimensional dense vector from " + std::to_string( a ) + "." );
+ T *copy = new T( self ); *copy *= ValueType( -1 ); return copy;
+ } );
+ }
+
+ template< class T, class... options >
+ inline static void registerScalarCopyingDenseVectorMethods ( pybind11::class_< T, options... > cls, PriorityTag< 0 > )
+ {}
+
+ template< class T, class... options >
+ inline static void registerScalarCopyingDenseVectorMethods ( pybind11::class_< T, options... > cls )
+ {
+ registerScalarCopyingDenseVectorMethods ( cls, PriorityTag< 42 >() );
+ }
+
+
+
+
+ // registerCopyingDenseVectorMethods
+ // ---------------------------------
+
+ template< class T, class... options >
+ inline static std::enable_if_t< std::is_copy_constructible< T >::value >
+ registerCopyingDenseVectorMethods ( pybind11::class_< T, options... > cls, PriorityTag< 1 > )
+ {
+ using ValueType = typename T::value_type;
+
+ using pybind11::operator""_a;
+
+ cls.def( "__pos__", [] ( pybind11::object self ) { return self; } );
+ cls.def( "__neg__", [] ( T &self ) { T *copy = new T( self ); *copy *= ValueType( -1 ); return copy; } );
+
+ cls.def( pybind11::self + pybind11::self );
+ cls.def( pybind11::self - pybind11::self );
+
+ cls.def( "__add__", [] ( T &self, pybind11::list x ) { return self + x.cast< T >(); }, "x"_a );
+ cls.def( "__sub__", [] ( T &self, pybind11::list x ) { return self - x.cast< T >(); }, "x"_a );
+
+ cls.def( "__radd__", [] ( T &self, pybind11::list x ) { return x.cast< T >() + self; }, "x"_a );
+ cls.def( "__rsub__", [] ( T &self, pybind11::list x ) { return x.cast< T >() - self; }, "x"_a );
+
+ cls.def( "__mul__", [] ( const T &self, ValueType x ) { T *copy = new T( self ); *copy *= x; return copy; }, "x"_a );
+ cls.def( "__div__", [] ( const T &self, ValueType x ) { T *copy = new T( self ); *copy /= x; return copy; }, "x"_a );
+ cls.def( "__truediv__", [] ( const T &self, ValueType x ) { T *copy = new T( self ); *copy /= x; return copy; }, "x"_a );
+
+ cls.def( "__rmul__", [] ( const T &self, ValueType x ) { T *copy = new T( self ); *copy *= x; return copy; }, "x"_a );
+ }
+
+ template< class T, class... options >
+ inline static void registerCopyingDenseVectorMethods ( pybind11::class_< T, options... > cls, PriorityTag< 0 > )
+ {}
+
+ template< class T, class... options >
+ inline static void registerCopyingDenseVectorMethods ( pybind11::class_< T, options... > cls )
+ {
+ registerCopyingDenseVectorMethods ( cls, PriorityTag< 42 >() );
+ }
+
+ } // namespace detail
+
+
+
+ // registerDenseVector
+ // -------------------
+
+ template< class T, class... options >
+ inline static void registerDenseVector ( pybind11::class_< T, options... > cls )
+ {
+ using ValueType = typename T::value_type;
+
+ using pybind11::operator""_a;
+
+ cls.def( "assign", [] ( T &self, const T &x ) { self = x; }, "x"_a );
+
+ cls.def( "__getitem__", [] ( const T &self, std::size_t i ) -> ValueType {
+ if( i < self.size() )
+ return self[ i ];
+ else
+ throw pybind11::index_error();
+ }, "i"_a );
+
+ cls.def( "__setitem__", [] ( T &self, std::size_t i, ValueType x ) {
+ if( i < self.size() )
+ self[ i ] = x;
+ else
+ throw pybind11::index_error();
+ }, "i"_a, "x"_a );
+
+ cls.def( "__len__", [] ( const T &self ) -> std::size_t { return self.size(); } );
+
+ cls.def( pybind11::self += pybind11::self );
+ cls.def( pybind11::self -= pybind11::self );
+
+ cls.def( pybind11::self == pybind11::self );
+ cls.def( pybind11::self != pybind11::self );
+
+ cls.def( pybind11::self += ValueType() );
+ cls.def( pybind11::self -= ValueType() );
+ cls.def( pybind11::self *= ValueType() );
+ cls.def( pybind11::self /= ValueType() );
+
+ detail::registerOneTensorInterface( cls );
+ detail::registerCopyingDenseVectorMethods( cls );
+ detail::registerScalarCopyingDenseVectorMethods( cls );
+
+ pybind11::implicitly_convertible< pybind11::list, T >();
+ }
+
+ } // namespace Python
+
+} // namespace Dune
+
+#endif // #ifndef DUNE_PYTHON_COMMON_DENSEVECTOR_HH
--- /dev/null
+#ifndef DUNE_PYTHON_COMMON_DIMRANGE_HH
+#define DUNE_PYTHON_COMMON_DIMRANGE_HH
+
+#include <cstddef>
+
+#include <tuple>
+#include <type_traits>
+#include <utility>
+
+#if HAVE_DUNE_TYPETREE
+#include <dune/typetree/compositenode.hh>
+#include <dune/typetree/powernode.hh>
+#endif // #if HAVE_DUNE_TYPETREE
+
+#include <dune/python/common/fmatrix.hh>
+#include <dune/python/common/fvector.hh>
+
+namespace Dune
+{
+
+ namespace Python
+ {
+
+ namespace detail
+ {
+
+ template< class T >
+ inline static constexpr T sum () noexcept
+ {
+ return static_cast< T >( 0 );
+ }
+
+ template< class T >
+ inline static constexpr T sum ( T a ) noexcept
+ {
+ return a;
+ }
+
+ template< class T, class... U >
+ inline static constexpr T sum ( T a, T b, U... c ) noexcept
+ {
+ return sum( a+b, c... );
+ }
+
+
+ template< class T, class Enable = void >
+ struct DimRange;
+
+ template< class T >
+ struct DimRange< T, std::enable_if_t< std::is_arithmetic< T >::value > >
+ : public std::integral_constant< std::size_t, 1 >
+ {};
+
+ template< class... T >
+ struct DimRange< std::tuple< T... >, void >
+ : public std::integral_constant< std::size_t, sum< std::size_t >( DimRange< T >::value... ) >
+ {};
+
+ template< class K, int n >
+ struct DimRange< FieldVector< K, n >, void >
+ : public std::integral_constant< std::size_t, n >
+ {};
+
+ template< class K, int m, int n >
+ struct DimRange< FieldMatrix< K, m, n >, void >
+ : public std::integral_constant< std::size_t, m*n >
+ {};
+
+#if HAVE_DUNE_TYPETREE
+ template< class T >
+ struct DimRange< T, std::enable_if_t< std::is_same< typename T::NodeTag, Dune::TypeTree::CompositeNodeTag >::value > >
+ : public DimRange< typename T::ChildTypes >
+ {};
+
+ template< class T >
+ struct DimRange< T, std::enable_if_t< std::is_same< typename T::NodeTag, Dune::TypeTree::PowerNodeTag >::value > >
+ : public std::integral_constant< std::size_t, sum< int >( T::CHILDREN * DimRange< typename T::ChildType >::value ) >
+ {};
+#endif // #if HAVE_DUNE_TYPETREE
+
+ template< class T >
+ struct DimRange< T, std::enable_if_t< std::is_class< typename T::FiniteElement >::value > >
+ : public DimRange< std::decay_t< decltype( std::declval< typename T::FiniteElement >().localBasis() ) > >
+ {};
+
+ template< class T >
+ struct DimRange< T, std::enable_if_t< std::is_same< std::size_t, decltype( static_cast< std::size_t >( T::Traits::dimRange ) ) >::value > >
+ : public std::integral_constant< std::size_t, static_cast< std::size_t >( T::Traits::dimRange ) >
+ {};
+
+ } // namespace detail
+
+
+
+ // DimRange
+ // --------
+
+ template< class T >
+ using DimRange = detail::DimRange< T >;
+
+ } // namespace Python
+
+} // namespace Dune
+
+#endif // #ifndef DUNE_PYTHON_COMMON_DIMRANGE_HH
--- /dev/null
+// -*- tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+#ifndef DUNE_PYTHON_COMMON_DYNMATRIX_HH
+#define DUNE_PYTHON_COMMON_DYNMATRIX_HH
+
+#include <string>
+#include <tuple>
+#include <utility>
+
+#include <dune/common/dynmatrix.hh>
+
+#include <dune/python/common/typeregistry.hh>
+#include <dune/python/common/densematrix.hh>
+#include <dune/python/pybind11/pybind11.h>
+#include <dune/python/pybind11/operators.h>
+
+namespace Dune
+{
+
+ namespace Python
+ {
+
+ template< class K >
+ static void registerDynamicMatrix ( pybind11::handle scope )
+ {
+ typedef Dune::DynamicMatrix< K > DM;
+
+ auto cls = insertClass< DM >( scope, "DynamicMatrix",
+ GenerateTypeName("Dune::DynamicMatrix",MetaType<K>()),
+ IncludeFiles{"dune/common/dynmatrix.hh"} ).first;
+
+ cls.def( pybind11::init( [] () { return new DM(); } ) );
+
+ cls.def( pybind11::init( [] ( pybind11::list rows ) {
+ std::size_t numRows = rows.size();
+ std::size_t numCols = (numRows > 0 ? rows[ 0 ].cast< pybind11::list >().size() : 0);
+
+ DM *self = new DM( numRows, numCols, K( 0 ) );
+ for( std::size_t i = 0; i < numRows; ++i )
+ {
+ pybind11::list row = rows[ i ].cast< pybind11::list >();
+ std::size_t numCol = row.size();
+ assert(numCols >= numCol); // dense matrix constructed with list having entries of different length
+ for( std::size_t j = 0; j < numCol; ++j )
+ (*self)[ i ][ j ] = row[ j ].template cast< K >();
+ }
+ return self;
+ } ) );
+
+ cls.def("__repr__",
+ [] (const DM& m) {
+ std::string repr = "Dune::DynamicMatrix:\n(";
+
+ for(unsigned int r = 0; r < m.rows(); r++)
+ {
+ repr += "(";
+ for (unsigned int c = 0; c < m.cols(); c++)
+ repr += (c > 0 ? ", " : "") + std::to_string(m[r][c]);
+ repr += std::string(")") + (r < m.rows() - 1 ? "\n" : "");
+ }
+
+ repr += ")";
+
+ return repr;
+ });
+
+ registerDenseMatrix<DM>(cls);
+ }
+
+ } // namespace Python
+
+} // namespace Dune
+
+#endif // #ifndef DUNE_PYTHON_COMMON_DYNMATRIX_HH
--- /dev/null
+// -*- tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+
+#ifndef DUNE_PYTHON_COMMON_DYNVECTOR_HH
+#define DUNE_PYTHON_COMMON_DYNVECTOR_HH
+
+#include <string>
+#include <tuple>
+#include <utility>
+
+#include <dune/common/dynvector.hh>
+
+#include <dune/python/common/typeregistry.hh>
+#include <dune/python/common/densevector.hh>
+#include <dune/python/pybind11/pybind11.h>
+#include <dune/python/pybind11/operators.h>
+
+namespace Dune
+{
+
+ namespace Python
+ {
+
+ template< class K >
+ void registerDynamicVector ( pybind11::handle scope )
+ {
+ using pybind11::operator""_a;
+
+ typedef Dune::DynamicVector< K > DV;
+
+ auto cls = insertClass< DV >( scope, "DynamicVector",
+ GenerateTypeName("Dune::DynamicVector",MetaType<K>()),
+ IncludeFiles{"dune/common/dynvector.hh"} ).first;
+
+ cls.def( pybind11::init( [] () { return new DV(); } ) );
+
+ cls.def( pybind11::init( [] ( pybind11::list x ) {
+ std::size_t size = x.size();
+ DV *self = new DV( size, K( 0 ) );
+ for( std::size_t i = 0; i < size; ++i )
+ (*self)[ i ] = x[ i ].template cast< K >();
+ return self;
+ } ), "x"_a );
+
+ cls.def("__repr__",
+ [] (const DV &v) {
+ std::string repr = "Dune::DynamicVector: (";
+
+ for (std::size_t i = 0; i < v.size(); ++i)
+ repr += (i > 0 ? ", " : "") + std::to_string(v[i]);
+
+ repr += ")";
+
+ return repr;
+ });
+
+ registerDenseVector<DV>(cls);
+ }
+
+ } // namespace Python
+
+} // namespace Dune
+
+#endif // #ifndef DUNE_PYTHON_COMMON_DYNVECTOR_HH
--- /dev/null
+#ifndef DUNE_PYTHON_COMMON_FMATRIX_HH
+#define DUNE_PYTHON_COMMON_FMATRIX_HH
+
+#include <cstddef>
+
+#include <algorithm>
+#include <string>
+#include <tuple>
+#include <utility>
+
+#include <dune/common/fmatrix.hh>
+
+#include <dune/python/common/typeregistry.hh>
+#include <dune/python/common/fmatrix.hh>
+#include <dune/python/common/densematrix.hh>
+#include <dune/python/common/string.hh>
+#include <dune/python/pybind11/pybind11.h>
+#include <dune/python/pybind11/operators.h>
+
+namespace Dune
+{
+
+ namespace Python
+ {
+
+ // registerFieldMatrix
+ // -------------------
+
+ template< class K, int m, int n, class ...options >
+ void registerFieldMatrix ( pybind11::handle scope, pybind11::class_<Dune::FieldMatrix<K,m,n>, options...> cls )
+ {
+ typedef Dune::FieldMatrix< K, m, n > FM;
+ using pybind11::operator""_a;
+
+ if( (m == 1) && (n == 1) )
+ {
+ cls.def( pybind11::init( [] ( int a ) { return new FM( K( a ) ); } ) );
+ cls.def( pybind11::init( [] ( K a ) { return new FM( a ); } ) );
+ cls.def( "__float__", [] ( const FM &self ) { return self[ 0 ][ 0 ]; } );
+ pybind11::implicitly_convertible< int, FM >();
+ pybind11::implicitly_convertible< K, FM >();
+ }
+
+ cls.def( pybind11::init( [] () { return new FM( K( 0 ) ); } ) );
+
+ cls.def( pybind11::init( [] ( pybind11::list rows ) {
+ FM *self = new FM( K( 0 ) );
+
+ const std::size_t numRows = std::min( static_cast< std::size_t >( m ), rows.size() );
+ for( std::size_t i = 0; i < numRows; ++i )
+ (*self)[ i ] = pybind11::cast< Dune::FieldVector< K, n > >( rows[ i ] );
+ return self;
+ } ) );
+
+ pybind11::implicitly_convertible< pybind11::list, FM >();
+
+ cls.def( "__str__", [] ( const FM &self ) {
+ std::string s = "(";
+ for( int i = 0; i < m; ++i )
+ {
+ s += (i > 0 ? "\n(" : "(");
+ for( int j = 0; j < n; ++j )
+ s += (j > 0 ? ", " : "") + std::to_string( self[ i ][ j ] );
+ s += std::string( ") ");
+ }
+ return s += ")";
+ });
+ cls.def( "__repr__", [] ( const FM &self ) {
+ return "Dune::FieldMatrix<"+to_string(m)+","+to_string(n)+">(...)";
+ } );
+
+ cls.def_buffer( [] ( FM &self ) -> pybind11::buffer_info {
+ return pybind11::buffer_info(
+ &self[ 0 ][ 0 ], /* Pointer to buffer */
+ sizeof( K ), /* Size of one scalar */
+ pybind11::format_descriptor< K >::value, /* Python struct-style format descriptor */
+ 2, /* Number of dimensions */
+ { m, n }, /* Buffer dimensions */
+ /* Strides (in bytes) for each index */
+ {
+ static_cast< std::size_t >( reinterpret_cast< char * >( &self[ 1 ][ 0 ] ) - reinterpret_cast< char * >( &self[ 0 ][ 0 ] ) ),
+ static_cast< std::size_t >( reinterpret_cast< char * >( &self[ 0 ][ 1 ] ) - reinterpret_cast< char * >( &self[ 0 ][ 0 ] ) )
+ }
+ );
+ } );
+
+ registerDenseMatrix< FM >( cls );
+ }
+
+ template< class K, int m, int n >
+ inline static void registerFieldMatrix ( pybind11::handle scope )
+ {
+ typedef Dune::FieldMatrix< K, m, n > FM;
+
+ auto entry = insertClass<FM>( scope, "FieldMatrix_"+std::to_string(m)+"_"+std::to_string(n), pybind11::buffer_protocol(),
+ GenerateTypeName("Dune::FieldMatrix",Dune::MetaType<K>(),m,n), IncludeFiles{"dune/common/fmatrix.hh"}
+ );
+ if (!entry.second)
+ return;
+ registerFieldMatrix( scope, entry.first );
+ }
+ } // namespace Python
+
+} // namespace Dune
+
+#endif // #ifndef DUNE_PYTHON_COMMON_FMATRIX_HH
--- /dev/null
+#ifndef DUNE_PYTHON_COMMON_FVECMATREG_HH
+#define DUNE_PYTHON_COMMON_FVECMATREG_HH
+
+#include <dune/python/pybind11/pybind11.h>
+#include <dune/python/common/fvector.hh>
+#include <dune/python/common/fmatrix.hh>
+namespace Dune
+{
+ namespace Python
+ {
+ template <class Type>
+ struct registerFieldVecMat;
+
+ template <class K, int size>
+ struct registerFieldVecMat<Dune::FieldVector<K,size>>
+ {
+ static void apply()
+ {
+ pybind11::module scope = pybind11::module::import("dune.common");
+ registerFieldVector<K,size>(scope);
+ }
+ };
+ template< class K, int row, int col >
+ struct registerFieldVecMat<Dune::FieldMatrix<K,row,col>>
+ {
+ static void apply()
+ {
+ pybind11::module scope = pybind11::module::import("dune.common");
+ registerFieldMatrix<K,row,col>(scope);
+ registerFieldVector<K,col>(scope);
+ registerFieldVector<K,row>(scope);
+ }
+ };
+ }
+}
+#endif // DUNE_PYTHON_COMMON_FVECMATREG_HH
--- /dev/null
+// -*- tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_PYTHON_COMMON_FVECTOR_HH
+#define DUNE_PYTHON_COMMON_FVECTOR_HH
+
+#include <cstddef>
+
+#include <algorithm>
+#include <stdexcept>
+#include <string>
+#include <tuple>
+#include <utility>
+
+#include <dune/common/fvector.hh>
+
+#include <dune/python/common/typeregistry.hh>
+#include <dune/python/common/densevector.hh>
+#include <dune/python/common/string.hh>
+#include <dune/python/pybind11/pybind11.h>
+#include <dune/python/pybind11/operators.h>
+
+namespace Dune
+{
+
+ namespace Python
+ {
+
+ // to_string
+ // ---------
+
+ template< class K, int size >
+ inline static std::string to_string ( const FieldVector< K, size > &x )
+ {
+ return "(" + join( ", ", [] ( auto &&x ) { return to_string( x ); }, x.begin(), x.end() ) + ")";
+ }
+
+
+
+ // registerFieldVector
+ // -------------------
+ template< class K, int size, class ...options >
+ void registerFieldVector ( pybind11::handle scope, pybind11::class_<Dune::FieldVector<K,size>, options...> cls )
+ {
+ using pybind11::operator""_a;
+
+ typedef Dune::FieldVector<K, size> FV;
+ cls.def( pybind11::init( [] () { return new FV(); } ) );
+
+ if( size == 1 )
+ {
+ cls.def( pybind11::init( [] ( int a ) { return new FV( K( a ) ); } ) );
+ cls.def( pybind11::init( [] ( K a ) { return new FV( a ); } ) );
+ cls.def( "__float__", [] ( const FV &self ) { return self[ 0 ]; } );
+ pybind11::implicitly_convertible< int, FV >();
+ pybind11::implicitly_convertible< K, FV >();
+ }
+
+ cls.def( pybind11::init( [] ( pybind11::buffer x ) {
+ pybind11::buffer_info info = x.request();
+ if( info.format != pybind11::format_descriptor< K >::format() )
+ throw pybind11::value_error( "Incompatible buffer format." );
+ if( info.ndim != 1 )
+ throw pybind11::value_error( "Only one-dimensional buffers can be converted into FieldVector." );
+ const ssize_t stride = info.strides[ 0 ] / sizeof( K );
+ const ssize_t sz = std::min( static_cast< ssize_t >( size ), info.shape[ 0 ] );
+
+ FV *self = new FV( K( 0 ) );
+ for( ssize_t i = 0; i < sz; ++i )
+ (*self)[ i ] = static_cast< K * >( info.ptr )[ i*stride ];
+ return self;
+ } ), "x"_a );
+
+ cls.def( pybind11::init( [] ( pybind11::tuple x ) {
+ FV *self = new FV( K( 0 ) );
+ const std::size_t sz = std::min( static_cast< std::size_t >( size ), x.size() );
+ // should this fail in case the sizes do not match?
+ for( std::size_t i = 0; i < sz; ++i )
+ (*self)[ i ] = x[ i ].template cast< K >();
+ return self;
+ } ), "x"_a );
+
+ cls.def( pybind11::init( [] ( pybind11::list x ) {
+ FV *self = new FV( K( 0 ) );
+ const std::size_t sz = std::min( static_cast< std::size_t >( size ), x.size() );
+ // should this fail in case the sizes do not match?
+ for( std::size_t i = 0; i < sz; ++i )
+ (*self)[ i ] = x[ i ].template cast< K >();
+ return self;
+ } ), "x"_a );
+
+ cls.def( pybind11::init( [] ( pybind11::args args ) {
+ FV *self = new FV( K( 0 ) );
+ const std::size_t sz = std::min( static_cast< std::size_t >( size ), args.size() );
+ // should this fail in case the sizes do not match?
+ for( std::size_t i = 0; i < sz; ++i )
+ (*self)[ i ] = args[ i ].template cast< K >();
+ return self;
+ } ) );
+
+ pybind11::implicitly_convertible< pybind11::args, FV >();
+ pybind11::implicitly_convertible< pybind11::buffer, FV >();
+
+ cls.def("copy", [](FV& , pybind11::args l) {
+ FV v(K(0));
+ const std::size_t sz = std::min(v.size(), l.size());
+ // should this fail in case the sizes do not match?
+ for (std::size_t i = 0; i < sz; ++i)
+ v[i] = l[i].template cast<K>();
+ return v;
+ });
+
+ cls.def( "__str__", [] ( const FV &self ) { return to_string( self ); } );
+ cls.def( "__repr__", [] ( const FV &self ) {
+ return "Dune::FieldVector<"+to_string(size)+">"+to_string(self);
+ } );
+
+ cls.def_buffer( [] ( FV &self ) -> pybind11::buffer_info {
+ return pybind11::buffer_info(
+ &self[ 0 ], /* Pointer to buffer */
+ sizeof( K ), /* Size of one scalar */
+ pybind11::format_descriptor< K >::format(), /* Python struct-style format descriptor */
+ 1, /* Number of dimensions */
+ { size }, /* Buffer dimensions */
+ /* Strides (in bytes) for each index */
+ {
+ static_cast< std::size_t >( reinterpret_cast< char * >( &self[ 1 ] ) - reinterpret_cast< char * >( &self[ 0 ] ) ) } );
+ }
+ );
+
+ registerDenseVector< FV >( cls );
+ }
+
+ template< class K, int size >
+ void registerFieldVector ( pybind11::handle scope, std::integral_constant< int, size > = {} )
+ {
+ typedef Dune::FieldVector<K, size> FV;
+
+ auto entry = insertClass<FV>(scope, "FieldVector_"+std::to_string(size), pybind11::buffer_protocol(),
+ GenerateTypeName("Dune::FieldVector",MetaType<K>(),size),IncludeFiles{"dune/common/fvector.hh"}
+ );
+ if (!entry.second)
+ return;
+ registerFieldVector(scope, entry.first);
+ }
+
+ template<class K, int... size>
+ void registerFieldVector(pybind11::handle scope, std::integer_sequence<int, size...>)
+ {
+ std::ignore = std::make_tuple((registerFieldVector<K>(scope, std::integral_constant<int, size>()), size)...);
+ }
+
+ } // namespace Python
+
+} // namespace Dune
+
+#endif // #ifndef DUNE_PYTHON_COMMON_FVECTOR_HH
--- /dev/null
+#ifndef DUNE_PYTHON_COMMON_GETDIMENSION_HH
+#define DUNE_PYTHON_COMMON_GETDIMENSION_HH
+
+#include <type_traits>
+
+#include <dune/common/fvector.hh>
+#include <dune/common/fmatrix.hh>
+
+template< class T, class=void >
+struct GetDimension;
+
+template< class T >
+struct GetDimension< T, std::enable_if_t<std::is_arithmetic<T>::value>>
+ : public std::integral_constant< int, 1 > {};
+template< class FT, int dim >
+struct GetDimension<Dune::FieldVector<FT,dim>>
+ : public std::integral_constant< int, dim > {};
+template< class FT, int dimr, int dimc >
+struct GetDimension<Dune::FieldMatrix<FT,dimr,dimc>>
+ : public std::integral_constant< int, dimr*dimc > {};
+
+#endif // DUNE_PYTHON_COMMON_GETDIMENSION_HH
--- /dev/null
+#ifndef DUNE_PYTHON_COMMON_LOGGER_HH
+#define DUNE_PYTHON_COMMON_LOGGER_HH
+
+#include <string>
+#include <utility>
+
+#include <dune/common/visibility.hh>
+
+#include <dune/python/pybind11/pybind11.h>
+
+namespace Dune
+{
+
+ namespace Python
+ {
+
+ // Logger
+ // ------
+
+ struct DUNE_PRIVATE Logger
+ {
+ enum class Level : int
+ {
+ critical = 50,
+ error = 40,
+ warning = 30,
+ info = 20,
+ debug = 10,
+ notSet = 0
+ };
+
+ explicit Logger ( const std::string &name )
+ : logging_( pybind11::module::import( "logging" ) ),
+ logger_( logging_.attr( "getLogger" )( name ) )
+ {}
+
+ template< class... Args >
+ void critical ( const std::string &msg, Args &&... args ) const
+ {
+ log( Level::critical, msg, std::forward< Args >( args )... );
+ }
+
+ template< class... Args >
+ void error ( const std::string &msg, Args &&... args ) const
+ {
+ log( Level::error, msg, std::forward< Args >( args )... );
+ }
+
+ template< class... Args >
+ void warning ( const std::string &msg, Args &&... args ) const
+ {
+ log( Level::warning, msg, std::forward< Args >( args )... );
+ }
+
+ template< class... Args >
+ void info ( const std::string &msg, Args &&... args ) const
+ {
+ log( Level::info, msg, std::forward< Args >( args )... );
+ }
+
+ template< class... Args >
+ void debug ( const std::string &msg, Args &&... args ) const
+ {
+ log( Level::debug, msg, std::forward< Args >( args )... );
+ }
+
+ template< class... Args >
+ void log ( int level, const std::string &msg, Args &&... args ) const
+ {
+ pybind11::object pyLevel = pybind11::int_( level );
+ logger_.attr( "log" )( pyLevel, msg, *pybind11::make_tuple( std::forward< Args >( args )... ) );
+ }
+
+ template< class... Args >
+ void log ( Level level, const std::string &msg, Args &&... args ) const
+ {
+ log( static_cast< int >( level ), msg, std::forward< Args >( args )... );
+ }
+
+ void setLevel ( int level ) { logger_.attr( "setLevel" )( level ); }
+
+ bool isEnabledFor ( int level ) { return pybind11::cast< bool >( logger_.attr( "isEnabledFor" )( level ) ); }
+
+ int getEffectiveLevel () { return pybind11::cast< int >( logger_.attr( "getEffectiveLevel" )() ); }
+
+ private:
+ pybind11::module logging_;
+ pybind11::object logger_;
+ };
+
+ } // namespace Python
+
+} // namespace Dune
+
+#endif // #ifndef DUNE_PYTHON_COMMON_LOGGER_HH
--- /dev/null
+// -*- tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#ifndef DUNE_PYTHON_COMMON_MPIHELPER_HH
+#define DUNE_PYTHON_COMMON_MPIHELPER_HH
+
+#include <config.h>
+
+#include <dune/common/parallel/communication.hh>
+#include <dune/common/parallel/mpihelper.hh>
+
+#include <dune/python/common/typeregistry.hh>
+#include <dune/python/pybind11/pybind11.h>
+
+namespace Dune
+{
+
+ namespace Python
+ {
+
+ // registerCollectiveCommunication
+ // -------------------------------
+
+ template< class Comm, class... objects >
+ inline static void registerCollectiveCommunication ( pybind11::class_< Comm, objects... > cls )
+ {
+ using pybind11::operator""_a;
+
+ cls.def_property_readonly( "rank", &Comm::rank );
+ cls.def_property_readonly( "size", &Comm::size );
+
+ cls.def( "barrier", &Comm::barrier );
+
+ cls.def( "min", [] ( const Comm &self, double x ) { return self.min( x ); }, "x"_a );
+ cls.def( "max", [] ( const Comm &self, double x ) { return self.max( x ); }, "x"_a );
+ cls.def( "sum", [] ( const Comm &self, double x ) { return self.sum( x ); }, "x"_a );
+ }
+
+ inline static void registerCollectiveCommunication ( pybind11::handle scope )
+ {
+ typedef Dune::CollectiveCommunication< Dune::MPIHelper::MPICommunicator > Comm;
+
+ auto typeName = GenerateTypeName( "Dune::CollectiveCommunication", "Dune::MPIHelper::MPICommunicator" );
+ auto includes = IncludeFiles{ "dune/common/parallel/collectivecommunication.hh", "dune/common/parallel/mpihelper.hh" };
+ auto clsComm = insertClass< Comm >( scope, "CollectiveCommunication", typeName, includes );
+ if( clsComm.second )
+ registerCollectiveCommunication( clsComm.first );
+
+ scope.attr( "comm" ) = pybind11::cast( Dune::MPIHelper::getCollectiveCommunication() );
+ }
+
+ } // namespace Python
+
+} // namespace Dune
+
+#endif // #ifndef DUNE_PYTHON_COMMON_MPIHELPER_HH
--- /dev/null
+#if HAVE_DUNE_GRID
+
+#ifndef DUNE_PYTHON_UTILITY_VECTORCOMMDATAHANDLE_HH
+#warning "Deprecated header, #include <dune/python/grid/numpycommdatahandle.hh> instead!"
+#include <dune/python/grid/numpycommdatahandle.hh>
+#endif // #ifndef DUNE_PYTHON_UTILITY_VECTORCOMMDATAHANDLE_HH
+
+#endif // #if HAVE_DUNE_GRID
--- /dev/null
+#ifndef DUNE_PYTHON_COMMON_NUMPYVECTOR_HH
+#define DUNE_PYTHON_COMMON_NUMPYVECTOR_HH
+
+#include <dune/common/densevector.hh>
+#include <dune/common/ftraits.hh>
+
+#include <dune/python/pybind11/numpy.h>
+#include <dune/python/pybind11/pybind11.h>
+#include <dune/python/pybind11/stl.h>
+
+namespace Dune
+{
+
+ namespace Python
+ {
+
+ // Internal Forward Declarations
+ // -----------------------------
+
+ template< class T >
+ class NumPyVector;
+
+ } // namespace Python
+
+
+
+ // DenseMatVecTraits for NumPyVector
+ // ---------------------------------
+
+ template< class T >
+ struct DenseMatVecTraits< Python::NumPyVector< T > >
+ {
+ typedef Python::NumPyVector< T > derived_type;
+ typedef pybind11::array_t< T > container_type;
+ typedef T value_type;
+ typedef std::size_t size_type;
+ };
+
+
+
+ // FieldTraits for NumPyVector
+ // ---------------------------
+
+ template< class T >
+ struct FieldTraits< Python::NumPyVector< T > >
+ {
+ typedef typename FieldTraits< T >::field_type field_type;
+ typedef typename FieldTraits< T >::real_type real_type;
+ };
+
+
+ namespace Python
+ {
+
+ template< class T >
+ class NumPyVector
+ : public DenseVector< NumPyVector< T > >
+ {
+ typedef NumPyVector< T > This;
+ typedef DenseVector< NumPyVector< T > > Base;
+
+ public:
+ typedef typename Base::size_type size_type;
+ typedef typename Base::value_type value_type;
+
+ explicit NumPyVector ( size_type size )
+ : array_( pybind11::buffer_info( nullptr, sizeof( T ),
+ pybind11::format_descriptor< T >::value, 1, { size }, { sizeof( T ) } )
+ ),
+ size_(size)
+ {}
+
+ NumPyVector ( pybind11::buffer buf )
+ : array_( buf ),
+ size_( 0 )
+ {
+ pybind11::buffer_info info = buf.request();
+ if (info.ndim != 1)
+ DUNE_THROW( InvalidStateException, "NumPyVector can only be created from one-dimensional array" );
+ size_ = info.shape[0];
+ }
+
+ NumPyVector ( const This &other ) = delete;
+ NumPyVector ( This &&other ) = delete;
+
+ ~NumPyVector() {}
+
+ This &operator= ( const This &other ) = delete;
+ This &operator= ( This &&other ) = delete;
+
+ operator pybind11::array_t< T > () const { return array_; }
+
+ const value_type &operator[] ( size_type index ) const
+ {
+ return data()[ index ];
+ }
+ value_type &operator[] ( size_type index )
+ {
+ return data()[ index ];
+ }
+ value_type &vec_access ( size_type index )
+ {
+ return data()[ index ];
+ }
+ const value_type &vec_access ( size_type index ) const
+ {
+ return data()[ index ];
+ }
+
+ inline const value_type *data () const
+ {
+ return static_cast< const value_type * >( const_cast<pybind11::array_t< T >&>(array_).request(false).ptr );
+ }
+ inline value_type *data ()
+ {
+ return static_cast< value_type * >( array_.request(true).ptr );
+ }
+ pybind11::array_t< T > &coefficients()
+ {
+ return array_;
+ }
+ pybind11::array_t< T > &coefficients() const
+ {
+ return array_;
+ }
+
+ size_type size () const
+ {
+ return size_;
+ }
+ size_type vec_size () const
+ {
+ return size_;
+ }
+
+ private:
+ pybind11::array_t< T > array_;
+ size_type size_;
+ };
+
+ } // namespace Python
+
+} // namespace Dune
+
+#endif // #ifndef DUNE_PYTHON_COMMON_NUMPYVECTOR_HH
--- /dev/null
+#ifndef DUNE_PYTHON_COMMON_PYTHONVECTOR_HH
+#define DUNE_PYTHON_COMMON_PYTHONVECTOR_HH
+
+#include <cstddef>
+
+#include <dune/common/densevector.hh>
+
+#include <dune/python/pybind11/pybind11.h>
+
+namespace Dune
+{
+
+ namespace Python
+ {
+
+ // Internal Forward Declarations
+ // -----------------------------
+
+ template< class K >
+ class PythonVector;
+
+ } // namespace Python
+
+
+
+ // DenseMatVecTraits for PythonVector
+ // ----------------------------------
+
+ template< class K >
+ struct DenseMatVecTraits< Python::PythonVector< K > >
+ {
+ typedef Python::PythonVector< K > derived_type;
+ typedef K value_type;
+ typedef std::size_t size_type;
+ };
+
+
+
+ // FieldTraits for PythonVector
+ // ----------------------------
+
+ template< class K >
+ struct FieldTraits< Python::PythonVector< K > >
+ {
+ typedef typename FieldTraits< K >::field_type field_type;
+ typedef typename FieldTraits< K >::real_type real_type;
+ };
+
+
+
+ namespace Python
+ {
+
+ template< class K >
+ class PythonVector
+ : public Dune::DenseVector< PythonVector< K > >
+ {
+ typedef PythonVector< K > This;
+ typedef Dune::DenseVector< PythonVector< K > > Base;
+
+ public:
+ typedef typename Base::size_type size_type;
+ typedef typename Base::field_type field_type;
+
+ explicit PythonVector ( pybind11::buffer buffer )
+ : buffer_( buffer ), info_( buffer_.request() )
+ {
+ if( info_.format != pybind11::format_descriptor< field_type >::format() )
+ throw std::runtime_error( "Incompatible buffer format." );
+ if( info_.ndim != 1 )
+ throw std::runtime_error( "PythonVector can only be instantiated from one-dimensional buffers." );
+ stride_ = info_.strides[ 0 ] / sizeof( field_type );
+ }
+
+ PythonVector ( const This & ) = delete;
+ PythonVector ( This && ) = default;
+
+ This &operator= ( const This & ) = delete;
+ This &operator= ( This && ) = default;
+
+ const field_type &operator[] ( size_type i ) const
+ {
+ return static_cast< const field_type * >( info_.ptr )[ i*stride_ ];
+ }
+
+ field_type &operator[] ( size_type i )
+ {
+ return static_cast< field_type * >( info_.ptr )[ i*stride_ ];
+ }
+
+ size_type size () const { return info_.shape[ 0 ]; }
+
+ private:
+ pybind11::buffer buffer_;
+ pybind11::buffer_info info_;
+ size_type stride_;
+ };
+
+ } // namespace Python
+
+} // namespace Dune
+
+#endif // #ifndef DUNE_PYTHON_COMMON_PYTHONVECTOR_HH
--- /dev/null
+#ifndef DUNE_PYTHON_COMMON_STRING_HH
+#define DUNE_PYTHON_COMMON_STRING_HH
+
+#include <string>
+#include <type_traits>
+
+namespace Dune
+{
+
+ namespace Python
+ {
+
+ using std::to_string;
+
+
+
+ // join
+ // ----
+
+ template< class Formatter, class Iterator >
+ inline static auto join ( const std::string &delimiter, Formatter &&formatter, Iterator begin, Iterator end )
+ -> std::enable_if_t< std::is_same< std::decay_t< decltype( formatter( *begin ) ) >, std::string >::value, std::string >
+ {
+ std::string s;
+ if( begin != end )
+ {
+ for( s = formatter( *begin++ ); begin != end; s += formatter( *begin++ ) )
+ s += delimiter;
+ }
+ return s;
+ }
+
+ template< class Iterator >
+ inline static auto join ( const std::string &delimiter, Iterator begin, Iterator end )
+ -> std::enable_if_t< std::is_same< std::decay_t< decltype( *begin ) >, std::string >::value, std::string >
+ {
+ return join( delimiter, [] ( decltype( *begin ) s ) -> decltype( *begin ) { return s; }, begin, end );
+ }
+
+ } // namespace Python
+
+} // namespace Dune
+
+#endif // #ifndef DUNE_PYTHON_COMMON_STRING_HH
--- /dev/null
+#ifndef DUNE_PYTHON_COMMON_TYPEREGISTRY_HH
+#define DUNE_PYTHON_COMMON_TYPEREGISTRY_HH
+
+#include <algorithm>
+#include <cassert>
+#include <iostream>
+#include <iterator>
+#include <string>
+#include <typeindex>
+#include <typeinfo>
+#include <unordered_map>
+#include <utility>
+#include <vector>
+
+#include <dune/common/typelist.hh>
+#include <dune/common/visibility.hh>
+
+#include <dune/python/pybind11/pybind11.h>
+#include <dune/python/pybind11/stl.h>
+
+#include <dune/python/pybind11/detail/internals.h>
+
+namespace Dune
+{
+
+ namespace Python
+ {
+
+ namespace detail
+ {
+
+ struct DUNE_PRIVATE Entry
+ {
+ std::string name;
+ std::string pyName;
+ std::vector< std::string > includes;
+ pybind11::object object;
+ };
+
+
+ // using an unordered_map directly for the type registry leads to a compilation
+ // error in the cast used in the typeRegistry function:
+ // assertion failed: Unable to cast type to reference: value is local to type caster
+ struct DUNE_PRIVATE TypeRegistry : public pybind11::detail::type_map<Entry>
+ {};
+
+
+ inline static TypeRegistry &typeRegistry ()
+ {
+ // BUG: Capturing the pybind11::object in a static variable leads to a
+ // memory fault in Python 3.6 upon module unloading.
+ // As a simple fix, we reobtain the reference each time the type
+ // registry is requested.
+#if 0
+ static pybind11::object instance;
+ if( !instance )
+ instance = pybind11::module::import( "dune.typeregistry" ).attr( "typeRegistry" );
+ return pybind11::cast< TypeRegistry & >( instance );
+#endif
+ return pybind11::cast< TypeRegistry & >( pybind11::module::import( "dune.typeregistry" ).attr( "typeRegistry" ) );
+ }
+
+
+ template< class T >
+ inline static auto findInTypeRegistry ()
+ {
+ auto pos = typeRegistry().find( typeid(T) );
+ return std::make_pair( pos, pos == typeRegistry().end() );
+ }
+
+
+ template< class T >
+ inline static auto insertIntoTypeRegistry (
+ const std::string &name,
+ const std::string &pyName,
+ std::vector< std::string > includes )
+ {
+ auto ret = typeRegistry().emplace( typeid(T), Entry() );
+ if( ret.second )
+ {
+ Entry &entry = ret.first->second;
+ entry.name = name;
+ entry.pyName = pyName;
+ entry.includes = std::move( includes );
+ }
+ return ret;
+ }
+
+
+
+ // TypeRegistryTag
+ // ---------------
+
+ struct TypeRegistryTag {};
+
+
+
+ // GenerateTypeName
+ // ----------------
+
+ struct GenerateTypeName
+ : public TypeRegistryTag
+ {
+ template <class... Templ>
+ GenerateTypeName(const std::string &main, Templ... templ)
+ : main_(main)
+ {
+ templates(templ...);
+ }
+ template <class T, class... options, class... Templ>
+ GenerateTypeName(const std::string &outer, const std::string &main, Templ... templ)
+ : main_(outer+"::"+main)
+ {
+ templates(templ...);
+ }
+ template <class... Templ>
+ GenerateTypeName(pybind11::handle &outer, const std::string &main, Templ... templ)
+ {
+ main_ = getTypeName(outer) + "::" + main;
+ includes_.push_back(getIncludes(outer));
+ std::sort( includes_.begin(), includes_.end() );
+ includes_.erase( std::unique( includes_.begin(), includes_.end() ), includes_.end() );
+ templates(templ...);
+ }
+ template <class Outer, class... Templ>
+ GenerateTypeName(Dune::MetaType<Outer>, const std::string &main, Templ... templ)
+ {
+ const auto& outerEntry = findInTypeRegistry<Outer>();
+ if (outerEntry.second)
+ throw std::invalid_argument( (std::string("couldn't find outer class ") +
+ typeid(Outer).name() + " in type registry").c_str() );
+ main_ = outerEntry.first->second.name + "::" + main;
+ includes_.push_back(outerEntry.first->second.includes);
+ std::sort( includes_.begin(), includes_.end() );
+ includes_.erase( std::unique( includes_.begin(), includes_.end() ), includes_.end() );
+ templates(templ...);
+ }
+ GenerateTypeName(const std::string &main, pybind11::args args)
+ : main_(main)
+ {
+ const std::size_t sz = args.size();
+ for( std::size_t i = 0; i < sz; ++i )
+ {
+ templates_.insert( templates_.end(), getTypeName( (pybind11::handle)(args[i]) ) );
+ includes_.insert( includes_.end(), getIncludes( (pybind11::handle)(args[i]) ) );
+ std::sort( includes_.begin(), includes_.end() );
+ includes_.erase( std::unique( includes_.begin(), includes_.end() ), includes_.end() );
+ }
+ }
+
+ std::string name () const
+ {
+ std::string fullName = main_;
+ if( !templates_.empty() )
+ {
+ const char *delim = "< ";
+ for( const auto &t : templates_ )
+ {
+ fullName += delim + t;
+ delim = ", ";
+ }
+ fullName += " >";
+ }
+ return fullName;
+ }
+ std::vector<std::string> includes() const
+ {
+ std::vector<std::string> ret;
+ for (const auto &i : includes_)
+ ret.insert( ret.end(), i.begin(), i.end() );
+ return ret;
+ }
+
+ private:
+ template <class... Args>
+ void templates(Args... args)
+ {
+ templates_.insert(templates_.end(), { getTypeName( std::forward< Args >( args ) )... } );
+ includes_.insert( includes_.end(), { getIncludes( std::forward<Args >( args ) )... } );
+ std::sort( includes_.begin(), includes_.end() );
+ includes_.erase( std::unique( includes_.begin(), includes_.end() ), includes_.end() );
+ }
+ template <class T,class... options>
+ std::string getTypeName(const pybind11::class_<T,options...> &obj)
+ {
+ return getTypeName(static_cast<pybind11::handle>(obj));
+ }
+ std::string getTypeName(const pybind11::object &obj)
+ {
+ return getTypeName(static_cast<pybind11::handle>(obj));
+ }
+ std::string getTypeName(const pybind11::handle &obj)
+ {
+ try
+ {
+ return obj.attr( "_typeName" ).cast<std::string>();
+ }
+ catch(const pybind11::error_already_set& )
+ {
+ return pybind11::str(obj);
+ }
+ }
+ static std::string getTypeName ( const GenerateTypeName &name ) { return name.name(); }
+ std::string getTypeName ( const std::string &name ) { return name; }
+ std::string getTypeName ( const char *name ) { return name; }
+ template <class T>
+ std::string getTypeName ( const Dune::MetaType<T> & )
+ {
+ auto entry = detail::findInTypeRegistry<T>();
+ if (entry.second)
+ throw std::invalid_argument( (std::string("couldn't find requested type ") +
+ typeid(T).name() + " in type registry").c_str() );
+ return entry.first->second.name;
+ }
+ template <class T>
+ std::string getTypeName ( const T& t ) { return std::to_string(t); }
+
+ static std::vector<std::string> getIncludes(pybind11::handle obj)
+ {
+ try
+ {
+ return obj.attr( "_includes" ).cast<std::vector<std::string>>();
+ }
+ catch(const pybind11::error_already_set& )
+ {
+ return {};
+ }
+ }
+ static std::vector< std::string > getIncludes ( const GenerateTypeName &name ) { return name.includes(); }
+ template <class T>
+ const std::vector<std::string> &getIncludes ( const Dune::MetaType<T> & )
+ {
+ auto entry = detail::findInTypeRegistry<T>();
+ if (entry.second)
+ throw std::invalid_argument( (std::string("couldn't find requested type ") +
+ typeid(T).name() + " in type registry").c_str() );
+ return entry.first->second.includes;
+ }
+ template <class T>
+ std::vector<std::string> getIncludes ( const T& ) { return {}; }
+
+ std::string main_;
+ std::vector<std::string> templates_;
+ std::vector<std::vector<std::string>> includes_;
+ };
+
+
+
+ // IncludeFiles
+ // ------------
+
+ struct IncludeFiles
+ : public std::vector< std::string >,
+ TypeRegistryTag
+ {
+ template <class... Args>
+ IncludeFiles(Args... args)
+ : std::vector<std::string>({args...}) {}
+ };
+
+
+ template< class DuneType >
+ inline static auto _addToTypeRegistry ( std::string pyName, const GenerateTypeName &typeName, const std::vector< std::string > &inc = {} )
+ {
+ std::vector< std::string > includes = typeName.includes();
+ includes.insert( includes.end(), inc.begin(), inc.end() );
+ auto entry = detail::insertIntoTypeRegistry< DuneType >( typeName.name(), std::move( pyName ), includes );
+ if( !entry.second )
+ throw std::invalid_argument( std::string( "adding a class (" ) + typeid( DuneType ).name() + ") twice to the type registry" );
+ return entry;
+ }
+
+
+ template <typename S, typename M, typename O = std::index_sequence<>>
+ struct filter : O {};
+ template <std::size_t I, std::size_t... Is,
+ std::size_t... Js, std::size_t... Ks>
+ struct filter<std::index_sequence<I, Is...>, std::index_sequence<0, Js...>,
+ std::index_sequence<Ks...>>
+ : filter<std::index_sequence<Is...>, std::index_sequence<Js...>,
+ std::index_sequence<Ks...>> {};
+ template <std::size_t I, std::size_t... Is, std::size_t... Js,
+ std::size_t... Ks>
+ struct filter<std::index_sequence<I, Is...>, std::index_sequence<1, Js...>,
+ std::index_sequence<Ks...>>
+ : filter<std::index_sequence<Is...>, std::index_sequence<Js...>,
+ std::index_sequence<Ks..., I>> {};
+
+ template< template< class T> class F, class... Args >
+ using Filter = filter< std::index_sequence_for< Args... >, std::index_sequence< F< Args >{}... > >;
+
+ template< class DuneType >
+ inline static auto
+ _addToTypeRegistry( const std::string &pyName, const IncludeFiles &inc, const GenerateTypeName &typeName )
+ {
+ return _addToTypeRegistry<DuneType>(std::move(pyName),typeName,inc);
+ }
+
+ template< class DuneType, class... Args, std::size_t... Is >
+ inline static auto
+ _addToTypeRegistry_filter_impl( const std::string &pyName, std::tuple< Args... > &&tuple, std::index_sequence< Is... > )
+ {
+ return _addToTypeRegistry<DuneType>(std::move(pyName),std::get<Is>(std::move(tuple))...);
+ }
+
+ template< class DuneType, class... options, class... Args >
+ inline static auto
+ generateClass ( pybind11::handle scope, const char *name, Args... args )
+ {
+ return pybind11::class_<DuneType,options...>(scope,name,args...);
+ }
+
+ template< class DuneType, class... options, class... Args, std::size_t... Is >
+ inline static auto
+ generateClass_filter_impl ( pybind11::handle scope, const char *name, std::tuple<Args...>&& tuple, std::index_sequence< Is... > )
+ {
+ return generateClass<DuneType,options...>(scope,name,std::get<Is>(std::move(tuple))...);
+ }
+
+ template<class B>
+ struct negation : std::integral_constant<bool,!bool(B::value)> { };
+ template <class T>
+ using baseTag = std::is_base_of<TypeRegistryTag,T>;
+ template <class T>
+ using notBaseTag = negation<baseTag<T>>;
+
+ } // namespace detail
+
+ /** \brief Generate class name as string given the base part plus a list of the
+ * template arguments.
+ *
+ * This class is used to generate a string storing the full C++ class
+ * name, i.e., 'Foo<A,B>' given the string 'Foo' and the two types 'A'
+ * and 'B'. The template arguments can be given as
+ * - string, bool, or int (anything that works with std::to_string
+ * basically, so even double...)
+ * - Dune::MetaType<T>, in which case the type T must be available in
+ * the type registry
+ * - pybind::object, in which case the attribute _typeName muse be
+ * availablea
+ * .
+ *
+ * In the last two cases the include files stored in the type registry
+ * for this type or attached to the object via the '_includes'
+ * attribute are collected.
+ *
+ * The main constructor is
+ * template <class... Templ>
+ * GenerateTypeName(const std::string &main, Templ... templ)
+ *
+ * Further constructors are available to handle cases like
+ * Bar::Foo<A,B>
+ * The outer type can again be either given as a string, a
+ * Dune::MetaType, or a pybind11::object.
+ *
+ * At the moment constructs like Bar::Traits::Foo or Bar<A>::Foo<B> are
+ * not possible except in the case where the outer type (e.g.
+ * Bar::Traits) can be passed in as a string.
+ *
+ */
+ using GenerateTypeName = detail::GenerateTypeName;
+
+ /** \brief A class used in the free standing method insertClass to tag
+ * the list of include files in the argument list
+ */
+ using IncludeFiles = detail::IncludeFiles;
+
+ using detail::findInTypeRegistry;
+
+ /** \brief add a type to the type registry without it being exported to python
+ */
+ template <class DuneType>
+ inline static void addToTypeRegistry ( const GenerateTypeName &typeName,
+ const std::vector< std::string > &inc = {}
+ )
+ {
+ std::vector<std::string> includes = typeName.includes();
+ includes.insert(includes.end(), inc.begin(), inc.end());
+ detail::insertIntoTypeRegistry<DuneType>(typeName.name(),"",includes);
+ }
+
+
+ /** Function used to generate a new pybind11::class_ object and to add
+ * an entry to the type registry.
+ *
+ * If 'DuneType' passed in as first argument will be exported to Python
+ * by adding it to the given scope. If the type has already been insert
+ * using this method the correct pybind11::class_ object is added to
+ * the scope and returned.
+ *
+ * Usage:
+ * \code
+ * auto entry = insertClass<Foo<A>,py11_toption1,py11_toption2>(scope, "Foo",
+ * py11_option1(), py11_option2(),
+ * GenerateTypeName( "Foo", Dune::MetaType<A>() ),
+ * IncludeFiles{ "file1.hh", "file2.hh" } );
+ * if (entry.second)
+ * registerFoo(entry.first);
+ * return entry.first);
+ * \endcode
+ *
+ * \tparam Type the type of the dune class to export
+ * \tparam options variadic template arguments passed on to * pybind11::class_
+ * \param scope the scope into which to export the python class
+ * \param pyName the name to use for the python export
+ * \param args variadic argument passed on to the constructor of
+ * pybind11::class_ with the exception of an argument
+ * the type 'GenerateTypeName' and 'IncludeFiles'
+ * \return
+ * \code
+ * make_pair(pybind11::class_<Type,options...>("pyName",args...), isNew);
+ * \endcode
+ * The first argument is the pybind11::class_
+ * object (either newly created or extracted from the type
+ * registry) The second argument is false if the type was
+ * already registered and otherwise it is true.
+ */
+ template< class Type, class... options, class... Args >
+ inline static std::pair< pybind11::class_< Type, options... >, bool >
+ insertClass ( pybind11::handle scope, std::string pyName, Args... args )
+ {
+ auto entry = detail::findInTypeRegistry<Type>();
+ if( !entry.second )
+ {
+ if( scope )
+ scope.attr( pyName.c_str() ) = entry.first->second.object;
+ return std::make_pair( static_cast< pybind11::class_< Type, options... > >( entry.first->second.object ), false );
+ }
+ else
+ {
+ auto entry = detail::_addToTypeRegistry_filter_impl< Type >( std::move( pyName ), std::forward_as_tuple( std::forward< Args >( args )... ), detail::Filter< detail::baseTag, std::decay_t< Args >... >{} );
+ auto cls = detail::generateClass_filter_impl< Type, options...>( scope, entry.first->second.pyName.c_str(), std::forward_as_tuple( std::forward< Args >( args )... ), detail::Filter< detail::notBaseTag, std::decay_t< Args >... >{} );
+ entry.first->second.object = cls;
+
+ cls.def_property_readonly_static( "_typeName", [ entry ] ( pybind11::object ) { return entry.first->second.name; } );
+ cls.def_property_readonly_static( "_includes", [ entry ] ( pybind11::object ) { return entry.first->second.includes; } );
+
+ return std::make_pair( cls, true );
+ }
+ }
+
+
+ // registerTypeRegistry
+ // --------------------
+
+ inline static void registerTypeRegistry ( pybind11::module scope )
+ {
+ using pybind11::operator""_a;
+
+ pybind11::class_< detail::TypeRegistry > cls( scope, "TypeRegistry" );
+
+ scope.attr( "typeRegistry" ) = pybind11::cast( std::make_unique< detail::TypeRegistry >() );
+
+ scope.def( "generateTypeName", []( std::string className, pybind11::args targs ) {
+ GenerateTypeName gtn( className, targs );
+ return std::make_pair( gtn.name(), gtn.includes() );
+ }, "className"_a );
+ }
+
+ } // namespace Python
+
+} // namespace Dune
+
+#endif // #ifndef DUNE_PYTHON_COMMON_TYPEREGISTRY_HH
--- /dev/null
+#ifndef DUNE_PYTHON_COMMON_TYPE_TRAITS_HH
+#define DUNE_PYTHON_COMMON_TYPE_TRAITS_HH
+
+#include <type_traits>
+#include <utility>
+
+#include <dune/common/ftraits.hh>
+#include <dune/common/rangeutilities.hh>
+#include <dune/common/typetraits.hh>
+#include <dune/common/std/type_traits.hh>
+
+#include <dune/python/pybind11/numpy.h>
+#include <dune/python/pybind11/pybind11.h>
+
+namespace Dune
+{
+
+ // External Forward Declarations
+ // -----------------------------
+
+ template< class V >
+ class DenseVector;
+
+ template< class K, int n >
+ class FieldVector;
+
+ template< class K, int m, int n >
+ class FieldMatrix;
+
+ namespace Imp
+ {
+
+ template< class B, class A >
+ class block_vector_unmanaged;
+
+ template< class B, class A >
+ class compressed_block_vector_unmanaged;
+
+ } // namespace Imp
+
+
+
+ namespace Python
+ {
+
+ // IsDenseVector
+ // -------------
+
+ template< class T, class = void >
+ struct IsDenseVector
+ : public std::false_type
+ {};
+
+ template< class T >
+ struct IsDenseVector< T, std::enable_if_t< std::is_base_of< Dune::DenseVector< T >, T >::value > >
+ : public std::true_type
+ {};
+
+
+
+ // IsBlockVector
+ // -------------
+
+ template< class T, class = void >
+ struct IsBlockVector
+ : public std::false_type
+ {};
+
+ template< class T >
+ struct IsBlockVector< T, std::enable_if_t< std::is_base_of< Imp::block_vector_unmanaged< typename T::block_type, typename T::allocator_type >, T >::value > >
+ : public std::true_type
+ {};
+
+ template< class T >
+ struct IsBlockVector< T, std::enable_if_t< std::is_base_of< Imp::compressed_block_vector_unmanaged< typename T::block_type, typename T::allocator_type >, T >::value > >
+ : public std::true_type
+ {};
+
+
+
+ // IsOneTensor
+ // -----------
+
+ template< class T, class = void >
+ struct IsOneTensor
+ : public std::false_type
+ {};
+
+ template< class T >
+ struct IsOneTensor< T, std::enable_if_t< IsDenseVector< T >::value > >
+ : public std::true_type
+ {};
+
+ template< class T >
+ struct IsOneTensor< T, std::enable_if_t< IsBlockVector< T >::value && IsOneTensor< typename T::block_type >::value > >
+ : public std::true_type
+ {};
+
+
+
+ namespace detail
+ {
+
+ // registerOneTensorInterface
+ // --------------------------
+
+ template< class T, class... options >
+ inline static auto registerOneTensorInterface ( pybind11::class_< T, options... > cls, PriorityTag< 1 > )
+ -> std::enable_if_t< IsOneTensor< T >::value >
+ {
+ cls.def( "__mul__", [] ( const T &self, const T &other ) { return self * other; } );
+ cls.def( "__rmul__", [] ( const T &self, const T &other ) { return self * other; } );
+
+ cls.def_property_readonly( "one_norm", [] ( const T &self ) { return self.one_norm(); } );
+ cls.def_property_readonly( "one_norm_real", [] ( const T &self ) { return self.one_norm_real(); } );
+ cls.def_property_readonly( "two_norm", [] ( const T &self ) { return self.two_norm(); } );
+ cls.def_property_readonly( "two_norm2", [] ( const T &self ) { return self.two_norm2(); } );
+ cls.def_property_readonly( "infinity_norm", [] ( const T &self ) { return self.infinity_norm(); } );
+ cls.def_property_readonly( "infinity_norm_real", [] ( const T &self ) { return self.infinity_norm_real(); } );
+ }
+
+ template< class T, class... options >
+ inline static void registerOneTensorInterface ( pybind11::class_< T, options... > cls, PriorityTag< 0 > )
+ {}
+
+ template< class T, class... options >
+ inline static void registerOneTensorInterface ( pybind11::class_< T, options... > cls )
+ {
+ registerOneTensorInterface( cls, PriorityTag< 42 >() );
+ }
+
+ } // namespace detail
+
+
+
+ // FixedTensorTraits
+ // -----------------
+
+ template< class T, class = void >
+ struct FixedTensorTraits;
+
+ template< class T >
+ struct FixedTensorTraits< T, std::enable_if_t< IsNumber< T >::value > >
+ {
+ static constexpr std::array< ssize_t, 0 > shape () noexcept { return {{}}; }
+ };
+
+ template< class K, int n >
+ struct FixedTensorTraits< FieldVector< K, n >, void >
+ {
+ static constexpr std::array< ssize_t, 1 > shape () noexcept { return {{ n }}; }
+ };
+
+ template< class K, int m, int n >
+ struct FixedTensorTraits< FieldMatrix< K, m, n >, void >
+ {
+ static constexpr std::array< ssize_t, 2 > shape () noexcept { return {{ m, n }}; }
+ };
+
+
+
+ // extendArray
+ // -----------
+
+ template< std::size_t... i, class T, class... X >
+ inline static constexpr auto extendArray ( std::index_sequence< i... >, const std::array< T, sizeof...( i ) > &array, X &&... x )
+ -> std::enable_if_t< std::conjunction< std::is_convertible< X, T >... >::value, std::array< T, sizeof...( i )+sizeof...( X ) > >
+ {
+ return {{ array[ i ]..., std::forward< X >( x )... }};
+ }
+
+ template< class T, std::size_t n, class... X >
+ inline static constexpr std::array< T, n+sizeof...( X ) > extendArray ( const std::array< T, n > &array, X &&... x )
+ {
+ return extendArray( std::make_index_sequence< n >(), array, std::forward< X >( x )... );
+ }
+
+
+
+ // getFixedTensor
+ // --------------
+
+ template< std::size_t... k, class X, class Y, std::size_t n, class... I >
+ inline static auto getFixedTensor ( std::index_sequence< k... >, X &x, const Y &y, std::array< ssize_t, n > j, I... i )
+ -> std::enable_if_t< (sizeof...( k ) == n) >
+ {
+ x = y( j[ k ]..., i... );
+ }
+
+ template< std::size_t... k, class X, class Y, std::size_t n, class... I >
+ inline static auto getFixedTensor ( std::index_sequence< k... >, X &x, const Y &y, std::array< ssize_t, n > j, I... i )
+ -> std::enable_if_t< (sizeof...( k ) < n) >
+ {
+ ssize_t &it = j[ sizeof...( k ) ];
+ ssize_t end = it;
+ for( it = 0; it < end; ++it )
+ getFixedTensor( std::index_sequence< k..., sizeof...( k ) >(), x[ it ], y, j, i... );
+ }
+
+ template< class X, class Y, class... I >
+ inline static auto getFixedTensor ( X &x, const Y &y, I... i )
+ {
+ getFixedTensor( std::index_sequence<>(), x, y, FixedTensorTraits< X >::shape(), i... );
+ }
+
+
+
+ // setFixedTensor
+ // --------------
+
+ template< std::size_t... k, class X, class Y, std::size_t n, class... I >
+ inline static auto setFixedTensor ( std::index_sequence< k... >, const X &x, Y &y, std::array< ssize_t, n > j, I... i )
+ -> std::enable_if_t< (sizeof...( k ) == n) >
+ {
+ y( j[ k ]..., i... ) = x;
+ }
+
+ template< std::size_t... k, class X, class Y, std::size_t n, class... I >
+ inline static auto setFixedTensor ( std::index_sequence< k... >, const X &x, Y &y, std::array< ssize_t, n > j, I... i )
+ -> std::enable_if_t< (sizeof...( k ) < n) >
+ {
+ ssize_t &it = j[ sizeof...( k ) ];
+ ssize_t end = it;
+ for( it = 0; it < end; ++it )
+ setFixedTensor( std::index_sequence< k..., sizeof...( k ) >(), x[ it ], y, j, i... );
+ }
+
+ template< class X, class Y, class... I >
+ inline static auto setFixedTensor ( const X &x, Y &y, I... i )
+ {
+ setFixedTensor( std::index_sequence<>(), x, y, FixedTensorTraits< X >::shape(), i... );
+ }
+
+
+
+ // vectorize
+ // ---------
+
+ template< class F, class Y, class X >
+ inline static pybind11::object vectorize ( F &&f, Y (*)( X ), pybind11::array_t< typename FieldTraits< std::decay_t< X > >::field_type > xArray )
+ {
+ const auto xShape = FixedTensorTraits< std::decay_t< X > >::shape();
+
+ auto x = xArray.unchecked();
+ if( (std::size_t)x.ndim() < xShape.size() )
+ throw pybind11::value_error( "Tensor has too few dimensions" );
+
+ for( auto i : range( xShape.size() ) )
+ {
+ if( x.shape( i ) != xShape[ i ] )
+ throw pybind11::value_error( "Tensor has wrong shape" );
+ }
+
+ std::decay_t< X > xi;
+ if( (xShape.size() > 0) && (x.ndim() == xShape.size()) )
+ {
+ getFixedTensor( xi, x );
+ return pybind11::cast( f( xi ) );
+ }
+ else if( x.ndim() == xShape.size() + 1 )
+ {
+ const ssize_t size = x.shape( xShape.size() );
+ const auto yShape = extendArray( FixedTensorTraits< std::decay_t< Y > >::shape(), size );
+
+ pybind11::array_t< typename FieldTraits< std::decay_t< Y > >::field_type > yArray( yShape );
+ auto y = yArray.template mutable_unchecked< yShape.size() >();
+
+ for( auto i : range( size ) )
+ {
+ getFixedTensor( xi, x, i );
+ setFixedTensor( f( xi ), y, i );
+ }
+ return std::move(yArray);
+ }
+ else
+ throw pybind11::value_error( "Tensor has too many dimensions" );
+ }
+
+ template< class F, class X >
+ inline static auto vectorize ( F &&f, pybind11::array_t< X > xArray )
+ -> decltype( vectorize( std::forward< F >( f ), static_cast< pybind11::detail::function_signature_t< F > * >( nullptr ), std::move( xArray ) ) )
+ {
+ return vectorize( std::forward< F >( f ), static_cast< pybind11::detail::function_signature_t< F > * >( nullptr ), std::move( xArray ) );
+ }
+
+ } // namespace Python
+
+} // namespace Dune
+
+#endif // #ifndef DUNE_PYTHON_COMMON_TYPE_TRAITS_HH
--- /dev/null
+add_subdirectory(detail)
+
+SET(HEADERS
+ attr.h
+ buffer_info.h
+ cast.h
+ chrono.h
+ common.h
+ complex.h
+ eigen.h
+ embed.h
+ eval.h
+ extensions.h
+ functional.h
+ iostream.h
+ numpy.h
+ operators.h
+ options.h
+ pybind11.h
+ pytypes.h
+ stl_bind.h
+ stl.h
+)
+
+install(FILES ${HEADERS} DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/dune/python/pybind11)
--- /dev/null
+Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>, All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Please also refer to the file CONTRIBUTING.md, which clarifies licensing of
+external contributions to this project including patches, pull requests, etc.
--- /dev/null
+# -*- coding: utf-8 -*-
+
+
+def _to_int(s):
+ try:
+ return int(s)
+ except ValueError:
+ return s
+
+
+__version__ = "2.6.1"
+version_info = tuple(_to_int(s) for s in __version__.split("."))
--- /dev/null
+/*
+ pybind11/attr.h: Infrastructure for processing custom
+ type and function attributes
+
+ Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>
+
+ All rights reserved. Use of this source code is governed by a
+ BSD-style license that can be found in the LICENSE file.
+*/
+
+#pragma once
+
+#include "cast.h"
+
+PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
+
+/// \addtogroup annotations
+/// @{
+
+/// Annotation for methods
+struct is_method { handle class_; is_method(const handle &c) : class_(c) { } };
+
+/// Annotation for operators
+struct is_operator { };
+
+/// Annotation for classes that cannot be subclassed
+struct is_final { };
+
+/// Annotation for parent scope
+struct scope { handle value; scope(const handle &s) : value(s) { } };
+
+/// Annotation for documentation
+struct doc { const char *value; doc(const char *value) : value(value) { } };
+
+/// Annotation for function names
+struct name { const char *value; name(const char *value) : value(value) { } };
+
+/// Annotation indicating that a function is an overload associated with a given "sibling"
+struct sibling { handle value; sibling(const handle &value) : value(value.ptr()) { } };
+
+/// Annotation indicating that a class derives from another given type
+template <typename T> struct base {
+
+ PYBIND11_DEPRECATED("base<T>() was deprecated in favor of specifying 'T' as a template argument to class_")
+ base() { } // NOLINT(modernize-use-equals-default): breaks MSVC 2015 when adding an attribute
+};
+
+/// Keep patient alive while nurse lives
+template <size_t Nurse, size_t Patient> struct keep_alive { };
+
+/// Annotation indicating that a class is involved in a multiple inheritance relationship
+struct multiple_inheritance { };
+
+/// Annotation which enables dynamic attributes, i.e. adds `__dict__` to a class
+struct dynamic_attr { };
+
+/// Annotation which enables the buffer protocol for a type
+struct buffer_protocol { };
+
+/// Annotation which requests that a special metaclass is created for a type
+struct metaclass {
+ handle value;
+
+ PYBIND11_DEPRECATED("py::metaclass() is no longer required. It's turned on by default now.")
+ metaclass() { } // NOLINT(modernize-use-equals-default): breaks MSVC 2015 when adding an attribute
+
+ /// Override pybind11's default metaclass
+ explicit metaclass(handle value) : value(value) { }
+};
+
+/// Annotation that marks a class as local to the module:
+struct module_local { const bool value; constexpr module_local(bool v = true) : value(v) { } };
+
+/// Annotation to mark enums as an arithmetic type
+struct arithmetic { };
+
+/// Mark a function for addition at the beginning of the existing overload chain instead of the end
+struct prepend { };
+
+/** \rst
+ A call policy which places one or more guard variables (``Ts...``) around the function call.
+
+ For example, this definition:
+
+ .. code-block:: cpp
+
+ m.def("foo", foo, py::call_guard<T>());
+
+ is equivalent to the following pseudocode:
+
+ .. code-block:: cpp
+
+ m.def("foo", [](args...) {
+ T scope_guard;
+ return foo(args...); // forwarded arguments
+ });
+ \endrst */
+template <typename... Ts> struct call_guard;
+
+template <> struct call_guard<> { using type = detail::void_type; };
+
+template <typename T>
+struct call_guard<T> {
+ static_assert(std::is_default_constructible<T>::value,
+ "The guard type must be default constructible");
+
+ using type = T;
+};
+
+template <typename T, typename... Ts>
+struct call_guard<T, Ts...> {
+ struct type {
+ T guard{}; // Compose multiple guard types with left-to-right default-constructor order
+ typename call_guard<Ts...>::type next{};
+ };
+};
+
+/// @} annotations
+
+PYBIND11_NAMESPACE_BEGIN(detail)
+/* Forward declarations */
+enum op_id : int;
+enum op_type : int;
+struct undefined_t;
+template <op_id id, op_type ot, typename L = undefined_t, typename R = undefined_t> struct op_;
+inline void keep_alive_impl(size_t Nurse, size_t Patient, function_call &call, handle ret);
+
+/// Internal data structure which holds metadata about a keyword argument
+struct argument_record {
+ const char *name; ///< Argument name
+ const char *descr; ///< Human-readable version of the argument value
+ handle value; ///< Associated Python object
+ bool convert : 1; ///< True if the argument is allowed to convert when loading
+ bool none : 1; ///< True if None is allowed when loading
+
+ argument_record(const char *name, const char *descr, handle value, bool convert, bool none)
+ : name(name), descr(descr), value(value), convert(convert), none(none) { }
+};
+
+/// Internal data structure which holds metadata about a bound function (signature, overloads, etc.)
+struct function_record {
+ function_record()
+ : is_constructor(false), is_new_style_constructor(false), is_stateless(false),
+ is_operator(false), is_method(false), has_args(false),
+ has_kwargs(false), has_kw_only_args(false), prepend(false) { }
+
+ /// Function name
+ char *name = nullptr; /* why no C++ strings? They generate heavier code.. */
+
+ // User-specified documentation string
+ char *doc = nullptr;
+
+ /// Human-readable version of the function signature
+ char *signature = nullptr;
+
+ /// List of registered keyword arguments
+ std::vector<argument_record> args;
+
+ /// Pointer to lambda function which converts arguments and performs the actual call
+ handle (*impl) (function_call &) = nullptr;
+
+ /// Storage for the wrapped function pointer and captured data, if any
+ void *data[3] = { };
+
+ /// Pointer to custom destructor for 'data' (if needed)
+ void (*free_data) (function_record *ptr) = nullptr;
+
+ /// Return value policy associated with this function
+ return_value_policy policy = return_value_policy::automatic;
+
+ /// True if name == '__init__'
+ bool is_constructor : 1;
+
+ /// True if this is a new-style `__init__` defined in `detail/init.h`
+ bool is_new_style_constructor : 1;
+
+ /// True if this is a stateless function pointer
+ bool is_stateless : 1;
+
+ /// True if this is an operator (__add__), etc.
+ bool is_operator : 1;
+
+ /// True if this is a method
+ bool is_method : 1;
+
+ /// True if the function has a '*args' argument
+ bool has_args : 1;
+
+ /// True if the function has a '**kwargs' argument
+ bool has_kwargs : 1;
+
+ /// True once a 'py::kw_only' is encountered (any following args are keyword-only)
+ bool has_kw_only_args : 1;
+
+ /// True if this function is to be inserted at the beginning of the overload resolution chain
+ bool prepend : 1;
+
+ /// Number of arguments (including py::args and/or py::kwargs, if present)
+ std::uint16_t nargs;
+
+ /// Number of trailing arguments (counted in `nargs`) that are keyword-only
+ std::uint16_t nargs_kw_only = 0;
+
+ /// Number of leading arguments (counted in `nargs`) that are positional-only
+ std::uint16_t nargs_pos_only = 0;
+
+ /// Python method object
+ PyMethodDef *def = nullptr;
+
+ /// Python handle to the parent scope (a class or a module)
+ handle scope;
+
+ /// Python handle to the sibling function representing an overload chain
+ handle sibling;
+
+ /// Pointer to next overload
+ function_record *next = nullptr;
+};
+
+/// Special data structure which (temporarily) holds metadata about a bound class
+struct type_record {
+ PYBIND11_NOINLINE type_record()
+ : multiple_inheritance(false), dynamic_attr(false), buffer_protocol(false),
+ default_holder(true), module_local(false), is_final(false) { }
+
+ /// Handle to the parent scope
+ handle scope;
+
+ /// Name of the class
+ const char *name = nullptr;
+
+ // Pointer to RTTI type_info data structure
+ const std::type_info *type = nullptr;
+
+ /// How large is the underlying C++ type?
+ size_t type_size = 0;
+
+ /// What is the alignment of the underlying C++ type?
+ size_t type_align = 0;
+
+ /// How large is the type's holder?
+ size_t holder_size = 0;
+
+ /// The global operator new can be overridden with a class-specific variant
+ void *(*operator_new)(size_t) = nullptr;
+
+ /// Function pointer to class_<..>::init_instance
+ void (*init_instance)(instance *, const void *) = nullptr;
+
+ /// Function pointer to class_<..>::dealloc
+ void (*dealloc)(detail::value_and_holder &) = nullptr;
+
+ /// List of base classes of the newly created type
+ list bases;
+
+ /// Optional docstring
+ const char *doc = nullptr;
+
+ /// Custom metaclass (optional)
+ handle metaclass;
+
+ /// Multiple inheritance marker
+ bool multiple_inheritance : 1;
+
+ /// Does the class manage a __dict__?
+ bool dynamic_attr : 1;
+
+ /// Does the class implement the buffer protocol?
+ bool buffer_protocol : 1;
+
+ /// Is the default (unique_ptr) holder type used?
+ bool default_holder : 1;
+
+ /// Is the class definition local to the module shared object?
+ bool module_local : 1;
+
+ /// Is the class inheritable from python classes?
+ bool is_final : 1;
+
+ PYBIND11_NOINLINE void add_base(const std::type_info &base, void *(*caster)(void *)) {
+ auto base_info = detail::get_type_info(base, false);
+ if (!base_info) {
+ std::string tname(base.name());
+ detail::clean_type_id(tname);
+ pybind11_fail("generic_type: type \"" + std::string(name) +
+ "\" referenced unknown base type \"" + tname + "\"");
+ }
+
+ if (default_holder != base_info->default_holder) {
+ std::string tname(base.name());
+ detail::clean_type_id(tname);
+ pybind11_fail("generic_type: type \"" + std::string(name) + "\" " +
+ (default_holder ? "does not have" : "has") +
+ " a non-default holder type while its base \"" + tname + "\" " +
+ (base_info->default_holder ? "does not" : "does"));
+ }
+
+ bases.append((PyObject *) base_info->type);
+
+ if (base_info->type->tp_dictoffset != 0)
+ dynamic_attr = true;
+
+ if (caster)
+ base_info->implicit_casts.emplace_back(type, caster);
+ }
+};
+
+inline function_call::function_call(const function_record &f, handle p) :
+ func(f), parent(p) {
+ args.reserve(f.nargs);
+ args_convert.reserve(f.nargs);
+}
+
+/// Tag for a new-style `__init__` defined in `detail/init.h`
+struct is_new_style_constructor { };
+
+/**
+ * Partial template specializations to process custom attributes provided to
+ * cpp_function_ and class_. These are either used to initialize the respective
+ * fields in the type_record and function_record data structures or executed at
+ * runtime to deal with custom call policies (e.g. keep_alive).
+ */
+template <typename T, typename SFINAE = void> struct process_attribute;
+
+template <typename T> struct process_attribute_default {
+ /// Default implementation: do nothing
+ static void init(const T &, function_record *) { }
+ static void init(const T &, type_record *) { }
+ static void precall(function_call &) { }
+ static void postcall(function_call &, handle) { }
+};
+
+/// Process an attribute specifying the function's name
+template <> struct process_attribute<name> : process_attribute_default<name> {
+ static void init(const name &n, function_record *r) { r->name = const_cast<char *>(n.value); }
+};
+
+/// Process an attribute specifying the function's docstring
+template <> struct process_attribute<doc> : process_attribute_default<doc> {
+ static void init(const doc &n, function_record *r) { r->doc = const_cast<char *>(n.value); }
+};
+
+/// Process an attribute specifying the function's docstring (provided as a C-style string)
+template <> struct process_attribute<const char *> : process_attribute_default<const char *> {
+ static void init(const char *d, function_record *r) { r->doc = const_cast<char *>(d); }
+ static void init(const char *d, type_record *r) { r->doc = const_cast<char *>(d); }
+};
+template <> struct process_attribute<char *> : process_attribute<const char *> { };
+
+/// Process an attribute indicating the function's return value policy
+template <> struct process_attribute<return_value_policy> : process_attribute_default<return_value_policy> {
+ static void init(const return_value_policy &p, function_record *r) { r->policy = p; }
+};
+
+/// Process an attribute which indicates that this is an overloaded function associated with a given sibling
+template <> struct process_attribute<sibling> : process_attribute_default<sibling> {
+ static void init(const sibling &s, function_record *r) { r->sibling = s.value; }
+};
+
+/// Process an attribute which indicates that this function is a method
+template <> struct process_attribute<is_method> : process_attribute_default<is_method> {
+ static void init(const is_method &s, function_record *r) { r->is_method = true; r->scope = s.class_; }
+};
+
+/// Process an attribute which indicates the parent scope of a method
+template <> struct process_attribute<scope> : process_attribute_default<scope> {
+ static void init(const scope &s, function_record *r) { r->scope = s.value; }
+};
+
+/// Process an attribute which indicates that this function is an operator
+template <> struct process_attribute<is_operator> : process_attribute_default<is_operator> {
+ static void init(const is_operator &, function_record *r) { r->is_operator = true; }
+};
+
+template <> struct process_attribute<is_new_style_constructor> : process_attribute_default<is_new_style_constructor> {
+ static void init(const is_new_style_constructor &, function_record *r) { r->is_new_style_constructor = true; }
+};
+
+inline void process_kw_only_arg(const arg &a, function_record *r) {
+ if (!a.name || strlen(a.name) == 0)
+ pybind11_fail("arg(): cannot specify an unnamed argument after an kw_only() annotation");
+ ++r->nargs_kw_only;
+}
+
+/// Process a keyword argument attribute (*without* a default value)
+template <> struct process_attribute<arg> : process_attribute_default<arg> {
+ static void init(const arg &a, function_record *r) {
+ if (r->is_method && r->args.empty())
+ r->args.emplace_back("self", nullptr, handle(), true /*convert*/, false /*none not allowed*/);
+ r->args.emplace_back(a.name, nullptr, handle(), !a.flag_noconvert, a.flag_none);
+
+ if (r->has_kw_only_args) process_kw_only_arg(a, r);
+ }
+};
+
+/// Process a keyword argument attribute (*with* a default value)
+template <> struct process_attribute<arg_v> : process_attribute_default<arg_v> {
+ static void init(const arg_v &a, function_record *r) {
+ if (r->is_method && r->args.empty())
+ r->args.emplace_back("self", nullptr /*descr*/, handle() /*parent*/, true /*convert*/, false /*none not allowed*/);
+
+ if (!a.value) {
+#if !defined(NDEBUG)
+ std::string descr("'");
+ if (a.name) descr += std::string(a.name) + ": ";
+ descr += a.type + "'";
+ if (r->is_method) {
+ if (r->name)
+ descr += " in method '" + (std::string) str(r->scope) + "." + (std::string) r->name + "'";
+ else
+ descr += " in method of '" + (std::string) str(r->scope) + "'";
+ } else if (r->name) {
+ descr += " in function '" + (std::string) r->name + "'";
+ }
+ pybind11_fail("arg(): could not convert default argument "
+ + descr + " into a Python object (type not registered yet?)");
+#else
+ pybind11_fail("arg(): could not convert default argument "
+ "into a Python object (type not registered yet?). "
+ "Compile in debug mode for more information.");
+#endif
+ }
+ r->args.emplace_back(a.name, a.descr, a.value.inc_ref(), !a.flag_noconvert, a.flag_none);
+
+ if (r->has_kw_only_args) process_kw_only_arg(a, r);
+ }
+};
+
+/// Process a keyword-only-arguments-follow pseudo argument
+template <> struct process_attribute<kw_only> : process_attribute_default<kw_only> {
+ static void init(const kw_only &, function_record *r) {
+ r->has_kw_only_args = true;
+ }
+};
+
+/// Process a positional-only-argument maker
+template <> struct process_attribute<pos_only> : process_attribute_default<pos_only> {
+ static void init(const pos_only &, function_record *r) {
+ r->nargs_pos_only = static_cast<std::uint16_t>(r->args.size());
+ }
+};
+
+/// Process a parent class attribute. Single inheritance only (class_ itself already guarantees that)
+template <typename T>
+struct process_attribute<T, enable_if_t<is_pyobject<T>::value>> : process_attribute_default<handle> {
+ static void init(const handle &h, type_record *r) { r->bases.append(h); }
+};
+
+/// Process a parent class attribute (deprecated, does not support multiple inheritance)
+template <typename T>
+struct process_attribute<base<T>> : process_attribute_default<base<T>> {
+ static void init(const base<T> &, type_record *r) { r->add_base(typeid(T), nullptr); }
+};
+
+/// Process a multiple inheritance attribute
+template <>
+struct process_attribute<multiple_inheritance> : process_attribute_default<multiple_inheritance> {
+ static void init(const multiple_inheritance &, type_record *r) { r->multiple_inheritance = true; }
+};
+
+template <>
+struct process_attribute<dynamic_attr> : process_attribute_default<dynamic_attr> {
+ static void init(const dynamic_attr &, type_record *r) { r->dynamic_attr = true; }
+};
+
+template <>
+struct process_attribute<is_final> : process_attribute_default<is_final> {
+ static void init(const is_final &, type_record *r) { r->is_final = true; }
+};
+
+template <>
+struct process_attribute<buffer_protocol> : process_attribute_default<buffer_protocol> {
+ static void init(const buffer_protocol &, type_record *r) { r->buffer_protocol = true; }
+};
+
+template <>
+struct process_attribute<metaclass> : process_attribute_default<metaclass> {
+ static void init(const metaclass &m, type_record *r) { r->metaclass = m.value; }
+};
+
+template <>
+struct process_attribute<module_local> : process_attribute_default<module_local> {
+ static void init(const module_local &l, type_record *r) { r->module_local = l.value; }
+};
+
+/// Process a 'prepend' attribute, putting this at the beginning of the overload chain
+template <>
+struct process_attribute<prepend> : process_attribute_default<prepend> {
+ static void init(const prepend &, function_record *r) { r->prepend = true; }
+};
+
+/// Process an 'arithmetic' attribute for enums (does nothing here)
+template <>
+struct process_attribute<arithmetic> : process_attribute_default<arithmetic> {};
+
+template <typename... Ts>
+struct process_attribute<call_guard<Ts...>> : process_attribute_default<call_guard<Ts...>> { };
+
+/**
+ * Process a keep_alive call policy -- invokes keep_alive_impl during the
+ * pre-call handler if both Nurse, Patient != 0 and use the post-call handler
+ * otherwise
+ */
+template <size_t Nurse, size_t Patient> struct process_attribute<keep_alive<Nurse, Patient>> : public process_attribute_default<keep_alive<Nurse, Patient>> {
+ template <size_t N = Nurse, size_t P = Patient, enable_if_t<N != 0 && P != 0, int> = 0>
+ static void precall(function_call &call) { keep_alive_impl(Nurse, Patient, call, handle()); }
+ template <size_t N = Nurse, size_t P = Patient, enable_if_t<N != 0 && P != 0, int> = 0>
+ static void postcall(function_call &, handle) { }
+ template <size_t N = Nurse, size_t P = Patient, enable_if_t<N == 0 || P == 0, int> = 0>
+ static void precall(function_call &) { }
+ template <size_t N = Nurse, size_t P = Patient, enable_if_t<N == 0 || P == 0, int> = 0>
+ static void postcall(function_call &call, handle ret) { keep_alive_impl(Nurse, Patient, call, ret); }
+};
+
+/// Recursively iterate over variadic template arguments
+template <typename... Args> struct process_attributes {
+ static void init(const Args&... args, function_record *r) {
+ int unused[] = { 0, (process_attribute<typename std::decay<Args>::type>::init(args, r), 0) ... };
+ ignore_unused(unused);
+ }
+ static void init(const Args&... args, type_record *r) {
+ int unused[] = { 0, (process_attribute<typename std::decay<Args>::type>::init(args, r), 0) ... };
+ ignore_unused(unused);
+ }
+ static void precall(function_call &call) {
+ int unused[] = { 0, (process_attribute<typename std::decay<Args>::type>::precall(call), 0) ... };
+ ignore_unused(unused);
+ }
+ static void postcall(function_call &call, handle fn_ret) {
+ int unused[] = { 0, (process_attribute<typename std::decay<Args>::type>::postcall(call, fn_ret), 0) ... };
+ ignore_unused(unused);
+ }
+};
+
+template <typename T>
+using is_call_guard = is_instantiation<call_guard, T>;
+
+/// Extract the ``type`` from the first `call_guard` in `Extras...` (or `void_type` if none found)
+template <typename... Extra>
+using extract_guard_t = typename exactly_one_t<is_call_guard, call_guard<>, Extra...>::type;
+
+/// Check the number of named arguments at compile time
+template <typename... Extra,
+ size_t named = constexpr_sum(std::is_base_of<arg, Extra>::value...),
+ size_t self = constexpr_sum(std::is_same<is_method, Extra>::value...)>
+constexpr bool expected_num_args(size_t nargs, bool has_args, bool has_kwargs) {
+ return named == 0 || (self + named + has_args + has_kwargs) == nargs;
+}
+
+PYBIND11_NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
--- /dev/null
+/*
+ pybind11/buffer_info.h: Python buffer object interface
+
+ Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>
+
+ All rights reserved. Use of this source code is governed by a
+ BSD-style license that can be found in the LICENSE file.
+*/
+
+#pragma once
+
+#include "detail/common.h"
+
+PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
+
+PYBIND11_NAMESPACE_BEGIN(detail)
+
+// Default, C-style strides
+inline std::vector<ssize_t> c_strides(const std::vector<ssize_t> &shape, ssize_t itemsize) {
+ auto ndim = shape.size();
+ std::vector<ssize_t> strides(ndim, itemsize);
+ if (ndim > 0)
+ for (size_t i = ndim - 1; i > 0; --i)
+ strides[i - 1] = strides[i] * shape[i];
+ return strides;
+}
+
+// F-style strides; default when constructing an array_t with `ExtraFlags & f_style`
+inline std::vector<ssize_t> f_strides(const std::vector<ssize_t> &shape, ssize_t itemsize) {
+ auto ndim = shape.size();
+ std::vector<ssize_t> strides(ndim, itemsize);
+ for (size_t i = 1; i < ndim; ++i)
+ strides[i] = strides[i - 1] * shape[i - 1];
+ return strides;
+}
+
+PYBIND11_NAMESPACE_END(detail)
+
+/// Information record describing a Python buffer object
+struct buffer_info {
+ void *ptr = nullptr; // Pointer to the underlying storage
+ ssize_t itemsize = 0; // Size of individual items in bytes
+ ssize_t size = 0; // Total number of entries
+ std::string format; // For homogeneous buffers, this should be set to format_descriptor<T>::format()
+ ssize_t ndim = 0; // Number of dimensions
+ std::vector<ssize_t> shape; // Shape of the tensor (1 entry per dimension)
+ std::vector<ssize_t> strides; // Number of bytes between adjacent entries (for each per dimension)
+ bool readonly = false; // flag to indicate if the underlying storage may be written to
+
+ buffer_info() = default;
+
+ buffer_info(void *ptr, ssize_t itemsize, const std::string &format, ssize_t ndim,
+ detail::any_container<ssize_t> shape_in, detail::any_container<ssize_t> strides_in, bool readonly=false)
+ : ptr(ptr), itemsize(itemsize), size(1), format(format), ndim(ndim),
+ shape(std::move(shape_in)), strides(std::move(strides_in)), readonly(readonly) {
+ if (ndim != (ssize_t) shape.size() || ndim != (ssize_t) strides.size())
+ pybind11_fail("buffer_info: ndim doesn't match shape and/or strides length");
+ for (size_t i = 0; i < (size_t) ndim; ++i)
+ size *= shape[i];
+ }
+
+ template <typename T>
+ buffer_info(T *ptr, detail::any_container<ssize_t> shape_in, detail::any_container<ssize_t> strides_in, bool readonly=false)
+ : buffer_info(private_ctr_tag(), ptr, sizeof(T), format_descriptor<T>::format(), static_cast<ssize_t>(shape_in->size()), std::move(shape_in), std::move(strides_in), readonly) { }
+
+ buffer_info(void *ptr, ssize_t itemsize, const std::string &format, ssize_t size, bool readonly=false)
+ : buffer_info(ptr, itemsize, format, 1, {size}, {itemsize}, readonly) { }
+
+ template <typename T>
+ buffer_info(T *ptr, ssize_t size, bool readonly=false)
+ : buffer_info(ptr, sizeof(T), format_descriptor<T>::format(), size, readonly) { }
+
+ template <typename T>
+ buffer_info(const T *ptr, ssize_t size, bool readonly=true)
+ : buffer_info(const_cast<T*>(ptr), sizeof(T), format_descriptor<T>::format(), size, readonly) { }
+
+ explicit buffer_info(Py_buffer *view, bool ownview = true)
+ : buffer_info(view->buf, view->itemsize, view->format, view->ndim,
+ {view->shape, view->shape + view->ndim},
+ /* Though buffer::request() requests PyBUF_STRIDES, ctypes objects
+ * ignore this flag and return a view with NULL strides.
+ * When strides are NULL, build them manually. */
+ view->strides
+ ? std::vector<ssize_t>(view->strides, view->strides + view->ndim)
+ : detail::c_strides({view->shape, view->shape + view->ndim}, view->itemsize),
+ view->readonly) {
+ this->m_view = view;
+ this->ownview = ownview;
+ }
+
+ buffer_info(const buffer_info &) = delete;
+ buffer_info& operator=(const buffer_info &) = delete;
+
+ buffer_info(buffer_info &&other) {
+ (*this) = std::move(other);
+ }
+
+ buffer_info& operator=(buffer_info &&rhs) {
+ ptr = rhs.ptr;
+ itemsize = rhs.itemsize;
+ size = rhs.size;
+ format = std::move(rhs.format);
+ ndim = rhs.ndim;
+ shape = std::move(rhs.shape);
+ strides = std::move(rhs.strides);
+ std::swap(m_view, rhs.m_view);
+ std::swap(ownview, rhs.ownview);
+ readonly = rhs.readonly;
+ return *this;
+ }
+
+ ~buffer_info() {
+ if (m_view && ownview) { PyBuffer_Release(m_view); delete m_view; }
+ }
+
+ Py_buffer *view() const { return m_view; }
+ Py_buffer *&view() { return m_view; }
+private:
+ struct private_ctr_tag { };
+
+ buffer_info(private_ctr_tag, void *ptr, ssize_t itemsize, const std::string &format, ssize_t ndim,
+ detail::any_container<ssize_t> &&shape_in, detail::any_container<ssize_t> &&strides_in, bool readonly)
+ : buffer_info(ptr, itemsize, format, ndim, std::move(shape_in), std::move(strides_in), readonly) { }
+
+ Py_buffer *m_view = nullptr;
+ bool ownview = false;
+};
+
+PYBIND11_NAMESPACE_BEGIN(detail)
+
+template <typename T, typename SFINAE = void> struct compare_buffer_info {
+ static bool compare(const buffer_info& b) {
+ return b.format == format_descriptor<T>::format() && b.itemsize == (ssize_t) sizeof(T);
+ }
+};
+
+template <typename T> struct compare_buffer_info<T, detail::enable_if_t<std::is_integral<T>::value>> {
+ static bool compare(const buffer_info& b) {
+ return (size_t) b.itemsize == sizeof(T) && (b.format == format_descriptor<T>::value ||
+ ((sizeof(T) == sizeof(long)) && b.format == (std::is_unsigned<T>::value ? "L" : "l")) ||
+ ((sizeof(T) == sizeof(size_t)) && b.format == (std::is_unsigned<T>::value ? "N" : "n")));
+ }
+};
+
+PYBIND11_NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
--- /dev/null
+/*
+ pybind11/cast.h: Partial template specializations to cast between
+ C++ and Python types
+
+ Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>
+
+ All rights reserved. Use of this source code is governed by a
+ BSD-style license that can be found in the LICENSE file.
+*/
+
+#pragma once
+
+#include "pytypes.h"
+#include "detail/typeid.h"
+#include "detail/descr.h"
+#include "detail/internals.h"
+#include <array>
+#include <limits>
+#include <tuple>
+#include <type_traits>
+
+#if defined(PYBIND11_CPP17)
+# if defined(__has_include)
+# if __has_include(<string_view>)
+# define PYBIND11_HAS_STRING_VIEW
+# endif
+# elif defined(_MSC_VER)
+# define PYBIND11_HAS_STRING_VIEW
+# endif
+#endif
+#ifdef PYBIND11_HAS_STRING_VIEW
+#include <string_view>
+#endif
+
+#if defined(__cpp_lib_char8_t) && __cpp_lib_char8_t >= 201811L
+# define PYBIND11_HAS_U8STRING
+#endif
+
+PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_BEGIN(detail)
+
+/// A life support system for temporary objects created by `type_caster::load()`.
+/// Adding a patient will keep it alive up until the enclosing function returns.
+class loader_life_support {
+public:
+ /// A new patient frame is created when a function is entered
+ loader_life_support() {
+ get_internals().loader_patient_stack.push_back(nullptr);
+ }
+
+ /// ... and destroyed after it returns
+ ~loader_life_support() {
+ auto &stack = get_internals().loader_patient_stack;
+ if (stack.empty())
+ pybind11_fail("loader_life_support: internal error");
+
+ auto ptr = stack.back();
+ stack.pop_back();
+ Py_CLEAR(ptr);
+
+ // A heuristic to reduce the stack's capacity (e.g. after long recursive calls)
+ if (stack.capacity() > 16 && !stack.empty() && stack.capacity() / stack.size() > 2)
+ stack.shrink_to_fit();
+ }
+
+ /// This can only be used inside a pybind11-bound function, either by `argument_loader`
+ /// at argument preparation time or by `py::cast()` at execution time.
+ PYBIND11_NOINLINE static void add_patient(handle h) {
+ auto &stack = get_internals().loader_patient_stack;
+ if (stack.empty())
+ throw cast_error("When called outside a bound function, py::cast() cannot "
+ "do Python -> C++ conversions which require the creation "
+ "of temporary values");
+
+ auto &list_ptr = stack.back();
+ if (list_ptr == nullptr) {
+ list_ptr = PyList_New(1);
+ if (!list_ptr)
+ pybind11_fail("loader_life_support: error allocating list");
+ PyList_SET_ITEM(list_ptr, 0, h.inc_ref().ptr());
+ } else {
+ auto result = PyList_Append(list_ptr, h.ptr());
+ if (result == -1)
+ pybind11_fail("loader_life_support: error adding patient");
+ }
+ }
+};
+
+// Gets the cache entry for the given type, creating it if necessary. The return value is the pair
+// returned by emplace, i.e. an iterator for the entry and a bool set to `true` if the entry was
+// just created.
+inline std::pair<decltype(internals::registered_types_py)::iterator, bool> all_type_info_get_cache(PyTypeObject *type);
+
+// Populates a just-created cache entry.
+PYBIND11_NOINLINE inline void all_type_info_populate(PyTypeObject *t, std::vector<type_info *> &bases) {
+ std::vector<PyTypeObject *> check;
+ for (handle parent : reinterpret_borrow<tuple>(t->tp_bases))
+ check.push_back((PyTypeObject *) parent.ptr());
+
+ auto const &type_dict = get_internals().registered_types_py;
+ for (size_t i = 0; i < check.size(); i++) {
+ auto type = check[i];
+ // Ignore Python2 old-style class super type:
+ if (!PyType_Check((PyObject *) type)) continue;
+
+ // Check `type` in the current set of registered python types:
+ auto it = type_dict.find(type);
+ if (it != type_dict.end()) {
+ // We found a cache entry for it, so it's either pybind-registered or has pre-computed
+ // pybind bases, but we have to make sure we haven't already seen the type(s) before: we
+ // want to follow Python/virtual C++ rules that there should only be one instance of a
+ // common base.
+ for (auto *tinfo : it->second) {
+ // NB: Could use a second set here, rather than doing a linear search, but since
+ // having a large number of immediate pybind11-registered types seems fairly
+ // unlikely, that probably isn't worthwhile.
+ bool found = false;
+ for (auto *known : bases) {
+ if (known == tinfo) { found = true; break; }
+ }
+ if (!found) bases.push_back(tinfo);
+ }
+ }
+ else if (type->tp_bases) {
+ // It's some python type, so keep follow its bases classes to look for one or more
+ // registered types
+ if (i + 1 == check.size()) {
+ // When we're at the end, we can pop off the current element to avoid growing
+ // `check` when adding just one base (which is typical--i.e. when there is no
+ // multiple inheritance)
+ check.pop_back();
+ i--;
+ }
+ for (handle parent : reinterpret_borrow<tuple>(type->tp_bases))
+ check.push_back((PyTypeObject *) parent.ptr());
+ }
+ }
+}
+
+/**
+ * Extracts vector of type_info pointers of pybind-registered roots of the given Python type. Will
+ * be just 1 pybind type for the Python type of a pybind-registered class, or for any Python-side
+ * derived class that uses single inheritance. Will contain as many types as required for a Python
+ * class that uses multiple inheritance to inherit (directly or indirectly) from multiple
+ * pybind-registered classes. Will be empty if neither the type nor any base classes are
+ * pybind-registered.
+ *
+ * The value is cached for the lifetime of the Python type.
+ */
+inline const std::vector<detail::type_info *> &all_type_info(PyTypeObject *type) {
+ auto ins = all_type_info_get_cache(type);
+ if (ins.second)
+ // New cache entry: populate it
+ all_type_info_populate(type, ins.first->second);
+
+ return ins.first->second;
+}
+
+/**
+ * Gets a single pybind11 type info for a python type. Returns nullptr if neither the type nor any
+ * ancestors are pybind11-registered. Throws an exception if there are multiple bases--use
+ * `all_type_info` instead if you want to support multiple bases.
+ */
+PYBIND11_NOINLINE inline detail::type_info* get_type_info(PyTypeObject *type) {
+ auto &bases = all_type_info(type);
+ if (bases.empty())
+ return nullptr;
+ if (bases.size() > 1)
+ pybind11_fail("pybind11::detail::get_type_info: type has multiple pybind11-registered bases");
+ return bases.front();
+}
+
+inline detail::type_info *get_local_type_info(const std::type_index &tp) {
+ auto &locals = registered_local_types_cpp();
+ auto it = locals.find(tp);
+ if (it != locals.end())
+ return it->second;
+ return nullptr;
+}
+
+inline detail::type_info *get_global_type_info(const std::type_index &tp) {
+ auto &types = get_internals().registered_types_cpp;
+ auto it = types.find(tp);
+ if (it != types.end())
+ return it->second;
+ return nullptr;
+}
+
+/// Return the type info for a given C++ type; on lookup failure can either throw or return nullptr.
+PYBIND11_NOINLINE inline detail::type_info *get_type_info(const std::type_index &tp,
+ bool throw_if_missing = false) {
+ if (auto ltype = get_local_type_info(tp))
+ return ltype;
+ if (auto gtype = get_global_type_info(tp))
+ return gtype;
+
+ if (throw_if_missing) {
+ std::string tname = tp.name();
+ detail::clean_type_id(tname);
+ pybind11_fail("pybind11::detail::get_type_info: unable to find type info for \"" + tname + "\"");
+ }
+ return nullptr;
+}
+
+PYBIND11_NOINLINE inline handle get_type_handle(const std::type_info &tp, bool throw_if_missing) {
+ detail::type_info *type_info = get_type_info(tp, throw_if_missing);
+ return handle(type_info ? ((PyObject *) type_info->type) : nullptr);
+}
+
+struct value_and_holder {
+ instance *inst = nullptr;
+ size_t index = 0u;
+ const detail::type_info *type = nullptr;
+ void **vh = nullptr;
+
+ // Main constructor for a found value/holder:
+ value_and_holder(instance *i, const detail::type_info *type, size_t vpos, size_t index) :
+ inst{i}, index{index}, type{type},
+ vh{inst->simple_layout ? inst->simple_value_holder : &inst->nonsimple.values_and_holders[vpos]}
+ {}
+
+ // Default constructor (used to signal a value-and-holder not found by get_value_and_holder())
+ value_and_holder() = default;
+
+ // Used for past-the-end iterator
+ value_and_holder(size_t index) : index{index} {}
+
+ template <typename V = void> V *&value_ptr() const {
+ return reinterpret_cast<V *&>(vh[0]);
+ }
+ // True if this `value_and_holder` has a non-null value pointer
+ explicit operator bool() const { return value_ptr(); }
+
+ template <typename H> H &holder() const {
+ return reinterpret_cast<H &>(vh[1]);
+ }
+ bool holder_constructed() const {
+ return inst->simple_layout
+ ? inst->simple_holder_constructed
+ : inst->nonsimple.status[index] & instance::status_holder_constructed;
+ }
+ void set_holder_constructed(bool v = true) {
+ if (inst->simple_layout)
+ inst->simple_holder_constructed = v;
+ else if (v)
+ inst->nonsimple.status[index] |= instance::status_holder_constructed;
+ else
+ inst->nonsimple.status[index] &= (uint8_t) ~instance::status_holder_constructed;
+ }
+ bool instance_registered() const {
+ return inst->simple_layout
+ ? inst->simple_instance_registered
+ : inst->nonsimple.status[index] & instance::status_instance_registered;
+ }
+ void set_instance_registered(bool v = true) {
+ if (inst->simple_layout)
+ inst->simple_instance_registered = v;
+ else if (v)
+ inst->nonsimple.status[index] |= instance::status_instance_registered;
+ else
+ inst->nonsimple.status[index] &= (uint8_t) ~instance::status_instance_registered;
+ }
+};
+
+// Container for accessing and iterating over an instance's values/holders
+struct values_and_holders {
+private:
+ instance *inst;
+ using type_vec = std::vector<detail::type_info *>;
+ const type_vec &tinfo;
+
+public:
+ values_and_holders(instance *inst) : inst{inst}, tinfo(all_type_info(Py_TYPE(inst))) {}
+
+ struct iterator {
+ private:
+ instance *inst = nullptr;
+ const type_vec *types = nullptr;
+ value_and_holder curr;
+ friend struct values_and_holders;
+ iterator(instance *inst, const type_vec *tinfo)
+ : inst{inst}, types{tinfo},
+ curr(inst /* instance */,
+ types->empty() ? nullptr : (*types)[0] /* type info */,
+ 0, /* vpos: (non-simple types only): the first vptr comes first */
+ 0 /* index */)
+ {}
+ // Past-the-end iterator:
+ iterator(size_t end) : curr(end) {}
+ public:
+ bool operator==(const iterator &other) const { return curr.index == other.curr.index; }
+ bool operator!=(const iterator &other) const { return curr.index != other.curr.index; }
+ iterator &operator++() {
+ if (!inst->simple_layout)
+ curr.vh += 1 + (*types)[curr.index]->holder_size_in_ptrs;
+ ++curr.index;
+ curr.type = curr.index < types->size() ? (*types)[curr.index] : nullptr;
+ return *this;
+ }
+ value_and_holder &operator*() { return curr; }
+ value_and_holder *operator->() { return &curr; }
+ };
+
+ iterator begin() { return iterator(inst, &tinfo); }
+ iterator end() { return iterator(tinfo.size()); }
+
+ iterator find(const type_info *find_type) {
+ auto it = begin(), endit = end();
+ while (it != endit && it->type != find_type) ++it;
+ return it;
+ }
+
+ size_t size() { return tinfo.size(); }
+};
+
+/**
+ * Extracts C++ value and holder pointer references from an instance (which may contain multiple
+ * values/holders for python-side multiple inheritance) that match the given type. Throws an error
+ * if the given type (or ValueType, if omitted) is not a pybind11 base of the given instance. If
+ * `find_type` is omitted (or explicitly specified as nullptr) the first value/holder are returned,
+ * regardless of type (and the resulting .type will be nullptr).
+ *
+ * The returned object should be short-lived: in particular, it must not outlive the called-upon
+ * instance.
+ */
+PYBIND11_NOINLINE inline value_and_holder instance::get_value_and_holder(const type_info *find_type /*= nullptr default in common.h*/, bool throw_if_missing /*= true in common.h*/) {
+ // Optimize common case:
+ if (!find_type || Py_TYPE(this) == find_type->type)
+ return value_and_holder(this, find_type, 0, 0);
+
+ detail::values_and_holders vhs(this);
+ auto it = vhs.find(find_type);
+ if (it != vhs.end())
+ return *it;
+
+ if (!throw_if_missing)
+ return value_and_holder();
+
+#if defined(NDEBUG)
+ pybind11_fail("pybind11::detail::instance::get_value_and_holder: "
+ "type is not a pybind11 base of the given instance "
+ "(compile in debug mode for type details)");
+#else
+ pybind11_fail("pybind11::detail::instance::get_value_and_holder: `" +
+ get_fully_qualified_tp_name(find_type->type) + "' is not a pybind11 base of the given `" +
+ get_fully_qualified_tp_name(Py_TYPE(this)) + "' instance");
+#endif
+}
+
+PYBIND11_NOINLINE inline void instance::allocate_layout() {
+ auto &tinfo = all_type_info(Py_TYPE(this));
+
+ const size_t n_types = tinfo.size();
+
+ if (n_types == 0)
+ pybind11_fail("instance allocation failed: new instance has no pybind11-registered base types");
+
+ simple_layout =
+ n_types == 1 && tinfo.front()->holder_size_in_ptrs <= instance_simple_holder_in_ptrs();
+
+ // Simple path: no python-side multiple inheritance, and a small-enough holder
+ if (simple_layout) {
+ simple_value_holder[0] = nullptr;
+ simple_holder_constructed = false;
+ simple_instance_registered = false;
+ }
+ else { // multiple base types or a too-large holder
+ // Allocate space to hold: [v1*][h1][v2*][h2]...[bb...] where [vN*] is a value pointer,
+ // [hN] is the (uninitialized) holder instance for value N, and [bb...] is a set of bool
+ // values that tracks whether each associated holder has been initialized. Each [block] is
+ // padded, if necessary, to an integer multiple of sizeof(void *).
+ size_t space = 0;
+ for (auto t : tinfo) {
+ space += 1; // value pointer
+ space += t->holder_size_in_ptrs; // holder instance
+ }
+ size_t flags_at = space;
+ space += size_in_ptrs(n_types); // status bytes (holder_constructed and instance_registered)
+
+ // Allocate space for flags, values, and holders, and initialize it to 0 (flags and values,
+ // in particular, need to be 0). Use Python's memory allocation functions: in Python 3.6
+ // they default to using pymalloc, which is designed to be efficient for small allocations
+ // like the one we're doing here; in earlier versions (and for larger allocations) they are
+ // just wrappers around malloc.
+#if PY_VERSION_HEX >= 0x03050000
+ nonsimple.values_and_holders = (void **) PyMem_Calloc(space, sizeof(void *));
+ if (!nonsimple.values_and_holders) throw std::bad_alloc();
+#else
+ nonsimple.values_and_holders = (void **) PyMem_New(void *, space);
+ if (!nonsimple.values_and_holders) throw std::bad_alloc();
+ std::memset(nonsimple.values_and_holders, 0, space * sizeof(void *));
+#endif
+ nonsimple.status = reinterpret_cast<uint8_t *>(&nonsimple.values_and_holders[flags_at]);
+ }
+ owned = true;
+}
+
+PYBIND11_NOINLINE inline void instance::deallocate_layout() {
+ if (!simple_layout)
+ PyMem_Free(nonsimple.values_and_holders);
+}
+
+PYBIND11_NOINLINE inline bool isinstance_generic(handle obj, const std::type_info &tp) {
+ handle type = detail::get_type_handle(tp, false);
+ if (!type)
+ return false;
+ return isinstance(obj, type);
+}
+
+PYBIND11_NOINLINE inline std::string error_string() {
+ if (!PyErr_Occurred()) {
+ PyErr_SetString(PyExc_RuntimeError, "Unknown internal error occurred");
+ return "Unknown internal error occurred";
+ }
+
+ error_scope scope; // Preserve error state
+
+ std::string errorString;
+ if (scope.type) {
+ errorString += handle(scope.type).attr("__name__").cast<std::string>();
+ errorString += ": ";
+ }
+ if (scope.value)
+ errorString += (std::string) str(scope.value);
+
+ PyErr_NormalizeException(&scope.type, &scope.value, &scope.trace);
+
+#if PY_MAJOR_VERSION >= 3
+ if (scope.trace != nullptr)
+ PyException_SetTraceback(scope.value, scope.trace);
+#endif
+
+#if !defined(PYPY_VERSION)
+ if (scope.trace) {
+ auto *trace = (PyTracebackObject *) scope.trace;
+
+ /* Get the deepest trace possible */
+ while (trace->tb_next)
+ trace = trace->tb_next;
+
+ PyFrameObject *frame = trace->tb_frame;
+ errorString += "\n\nAt:\n";
+ while (frame) {
+ int lineno = PyFrame_GetLineNumber(frame);
+ errorString +=
+ " " + handle(frame->f_code->co_filename).cast<std::string>() +
+ "(" + std::to_string(lineno) + "): " +
+ handle(frame->f_code->co_name).cast<std::string>() + "\n";
+ frame = frame->f_back;
+ }
+ }
+#endif
+
+ return errorString;
+}
+
+PYBIND11_NOINLINE inline handle get_object_handle(const void *ptr, const detail::type_info *type ) {
+ auto &instances = get_internals().registered_instances;
+ auto range = instances.equal_range(ptr);
+ for (auto it = range.first; it != range.second; ++it) {
+ for (const auto &vh : values_and_holders(it->second)) {
+ if (vh.type == type)
+ return handle((PyObject *) it->second);
+ }
+ }
+ return handle();
+}
+
+inline PyThreadState *get_thread_state_unchecked() {
+#if defined(PYPY_VERSION)
+ return PyThreadState_GET();
+#elif PY_VERSION_HEX < 0x03000000
+ return _PyThreadState_Current;
+#elif PY_VERSION_HEX < 0x03050000
+ return (PyThreadState*) _Py_atomic_load_relaxed(&_PyThreadState_Current);
+#elif PY_VERSION_HEX < 0x03050200
+ return (PyThreadState*) _PyThreadState_Current.value;
+#else
+ return _PyThreadState_UncheckedGet();
+#endif
+}
+
+// Forward declarations
+inline void keep_alive_impl(handle nurse, handle patient);
+inline PyObject *make_new_instance(PyTypeObject *type);
+
+class type_caster_generic {
+public:
+ PYBIND11_NOINLINE type_caster_generic(const std::type_info &type_info)
+ : typeinfo(get_type_info(type_info)), cpptype(&type_info) { }
+
+ type_caster_generic(const type_info *typeinfo)
+ : typeinfo(typeinfo), cpptype(typeinfo ? typeinfo->cpptype : nullptr) { }
+
+ bool load(handle src, bool convert) {
+ return load_impl<type_caster_generic>(src, convert);
+ }
+
+ PYBIND11_NOINLINE static handle cast(const void *_src, return_value_policy policy, handle parent,
+ const detail::type_info *tinfo,
+ void *(*copy_constructor)(const void *),
+ void *(*move_constructor)(const void *),
+ const void *existing_holder = nullptr) {
+ if (!tinfo) // no type info: error will be set already
+ return handle();
+
+ void *src = const_cast<void *>(_src);
+ if (src == nullptr)
+ return none().release();
+
+ auto it_instances = get_internals().registered_instances.equal_range(src);
+ for (auto it_i = it_instances.first; it_i != it_instances.second; ++it_i) {
+ for (auto instance_type : detail::all_type_info(Py_TYPE(it_i->second))) {
+ if (instance_type && same_type(*instance_type->cpptype, *tinfo->cpptype))
+ return handle((PyObject *) it_i->second).inc_ref();
+ }
+ }
+
+ auto inst = reinterpret_steal<object>(make_new_instance(tinfo->type));
+ auto wrapper = reinterpret_cast<instance *>(inst.ptr());
+ wrapper->owned = false;
+ void *&valueptr = values_and_holders(wrapper).begin()->value_ptr();
+
+ switch (policy) {
+ case return_value_policy::automatic:
+ case return_value_policy::take_ownership:
+ valueptr = src;
+ wrapper->owned = true;
+ break;
+
+ case return_value_policy::automatic_reference:
+ case return_value_policy::reference:
+ valueptr = src;
+ wrapper->owned = false;
+ break;
+
+ case return_value_policy::copy:
+ if (copy_constructor)
+ valueptr = copy_constructor(src);
+ else {
+#if defined(NDEBUG)
+ throw cast_error("return_value_policy = copy, but type is "
+ "non-copyable! (compile in debug mode for details)");
+#else
+ std::string type_name(tinfo->cpptype->name());
+ detail::clean_type_id(type_name);
+ throw cast_error("return_value_policy = copy, but type " +
+ type_name + " is non-copyable!");
+#endif
+ }
+ wrapper->owned = true;
+ break;
+
+ case return_value_policy::move:
+ if (move_constructor)
+ valueptr = move_constructor(src);
+ else if (copy_constructor)
+ valueptr = copy_constructor(src);
+ else {
+#if defined(NDEBUG)
+ throw cast_error("return_value_policy = move, but type is neither "
+ "movable nor copyable! "
+ "(compile in debug mode for details)");
+#else
+ std::string type_name(tinfo->cpptype->name());
+ detail::clean_type_id(type_name);
+ throw cast_error("return_value_policy = move, but type " +
+ type_name + " is neither movable nor copyable!");
+#endif
+ }
+ wrapper->owned = true;
+ break;
+
+ case return_value_policy::reference_internal:
+ valueptr = src;
+ wrapper->owned = false;
+ keep_alive_impl(inst, parent);
+ break;
+
+ default:
+ throw cast_error("unhandled return_value_policy: should not happen!");
+ }
+
+ tinfo->init_instance(wrapper, existing_holder);
+
+ return inst.release();
+ }
+
+ // Base methods for generic caster; there are overridden in copyable_holder_caster
+ void load_value(value_and_holder &&v_h) {
+ auto *&vptr = v_h.value_ptr();
+ // Lazy allocation for unallocated values:
+ if (vptr == nullptr) {
+ auto *type = v_h.type ? v_h.type : typeinfo;
+ if (type->operator_new) {
+ vptr = type->operator_new(type->type_size);
+ } else {
+ #if defined(__cpp_aligned_new) && (!defined(_MSC_VER) || _MSC_VER >= 1912)
+ if (type->type_align > __STDCPP_DEFAULT_NEW_ALIGNMENT__)
+ vptr = ::operator new(type->type_size,
+ std::align_val_t(type->type_align));
+ else
+ #endif
+ vptr = ::operator new(type->type_size);
+ }
+ }
+ value = vptr;
+ }
+ bool try_implicit_casts(handle src, bool convert) {
+ for (auto &cast : typeinfo->implicit_casts) {
+ type_caster_generic sub_caster(*cast.first);
+ if (sub_caster.load(src, convert)) {
+ value = cast.second(sub_caster.value);
+ return true;
+ }
+ }
+ return false;
+ }
+ bool try_direct_conversions(handle src) {
+ for (auto &converter : *typeinfo->direct_conversions) {
+ if (converter(src.ptr(), value))
+ return true;
+ }
+ return false;
+ }
+ void check_holder_compat() {}
+
+ PYBIND11_NOINLINE static void *local_load(PyObject *src, const type_info *ti) {
+ auto caster = type_caster_generic(ti);
+ if (caster.load(src, false))
+ return caster.value;
+ return nullptr;
+ }
+
+ /// Try to load with foreign typeinfo, if available. Used when there is no
+ /// native typeinfo, or when the native one wasn't able to produce a value.
+ PYBIND11_NOINLINE bool try_load_foreign_module_local(handle src) {
+ constexpr auto *local_key = PYBIND11_MODULE_LOCAL_ID;
+ const auto pytype = type::handle_of(src);
+ if (!hasattr(pytype, local_key))
+ return false;
+
+ type_info *foreign_typeinfo = reinterpret_borrow<capsule>(getattr(pytype, local_key));
+ // Only consider this foreign loader if actually foreign and is a loader of the correct cpp type
+ if (foreign_typeinfo->module_local_load == &local_load
+ || (cpptype && !same_type(*cpptype, *foreign_typeinfo->cpptype)))
+ return false;
+
+ if (auto result = foreign_typeinfo->module_local_load(src.ptr(), foreign_typeinfo)) {
+ value = result;
+ return true;
+ }
+ return false;
+ }
+
+ // Implementation of `load`; this takes the type of `this` so that it can dispatch the relevant
+ // bits of code between here and copyable_holder_caster where the two classes need different
+ // logic (without having to resort to virtual inheritance).
+ template <typename ThisT>
+ PYBIND11_NOINLINE bool load_impl(handle src, bool convert) {
+ if (!src) return false;
+ if (!typeinfo) return try_load_foreign_module_local(src);
+ if (src.is_none()) {
+ // Defer accepting None to other overloads (if we aren't in convert mode):
+ if (!convert) return false;
+ value = nullptr;
+ return true;
+ }
+
+ auto &this_ = static_cast<ThisT &>(*this);
+ this_.check_holder_compat();
+
+ PyTypeObject *srctype = Py_TYPE(src.ptr());
+
+ // Case 1: If src is an exact type match for the target type then we can reinterpret_cast
+ // the instance's value pointer to the target type:
+ if (srctype == typeinfo->type) {
+ this_.load_value(reinterpret_cast<instance *>(src.ptr())->get_value_and_holder());
+ return true;
+ }
+ // Case 2: We have a derived class
+ else if (PyType_IsSubtype(srctype, typeinfo->type)) {
+ auto &bases = all_type_info(srctype);
+ bool no_cpp_mi = typeinfo->simple_type;
+
+ // Case 2a: the python type is a Python-inherited derived class that inherits from just
+ // one simple (no MI) pybind11 class, or is an exact match, so the C++ instance is of
+ // the right type and we can use reinterpret_cast.
+ // (This is essentially the same as case 2b, but because not using multiple inheritance
+ // is extremely common, we handle it specially to avoid the loop iterator and type
+ // pointer lookup overhead)
+ if (bases.size() == 1 && (no_cpp_mi || bases.front()->type == typeinfo->type)) {
+ this_.load_value(reinterpret_cast<instance *>(src.ptr())->get_value_and_holder());
+ return true;
+ }
+ // Case 2b: the python type inherits from multiple C++ bases. Check the bases to see if
+ // we can find an exact match (or, for a simple C++ type, an inherited match); if so, we
+ // can safely reinterpret_cast to the relevant pointer.
+ else if (bases.size() > 1) {
+ for (auto base : bases) {
+ if (no_cpp_mi ? PyType_IsSubtype(base->type, typeinfo->type) : base->type == typeinfo->type) {
+ this_.load_value(reinterpret_cast<instance *>(src.ptr())->get_value_and_holder(base));
+ return true;
+ }
+ }
+ }
+
+ // Case 2c: C++ multiple inheritance is involved and we couldn't find an exact type match
+ // in the registered bases, above, so try implicit casting (needed for proper C++ casting
+ // when MI is involved).
+ if (this_.try_implicit_casts(src, convert))
+ return true;
+ }
+
+ // Perform an implicit conversion
+ if (convert) {
+ for (auto &converter : typeinfo->implicit_conversions) {
+ auto temp = reinterpret_steal<object>(converter(src.ptr(), typeinfo->type));
+ if (load_impl<ThisT>(temp, false)) {
+ loader_life_support::add_patient(temp);
+ return true;
+ }
+ }
+ if (this_.try_direct_conversions(src))
+ return true;
+ }
+
+ // Failed to match local typeinfo. Try again with global.
+ if (typeinfo->module_local) {
+ if (auto gtype = get_global_type_info(*typeinfo->cpptype)) {
+ typeinfo = gtype;
+ return load(src, false);
+ }
+ }
+
+ // Global typeinfo has precedence over foreign module_local
+ return try_load_foreign_module_local(src);
+ }
+
+
+ // Called to do type lookup and wrap the pointer and type in a pair when a dynamic_cast
+ // isn't needed or can't be used. If the type is unknown, sets the error and returns a pair
+ // with .second = nullptr. (p.first = nullptr is not an error: it becomes None).
+ PYBIND11_NOINLINE static std::pair<const void *, const type_info *> src_and_type(
+ const void *src, const std::type_info &cast_type, const std::type_info *rtti_type = nullptr) {
+ if (auto *tpi = get_type_info(cast_type))
+ return {src, const_cast<const type_info *>(tpi)};
+
+ // Not found, set error:
+ std::string tname = rtti_type ? rtti_type->name() : cast_type.name();
+ detail::clean_type_id(tname);
+ std::string msg = "Unregistered type : " + tname;
+ PyErr_SetString(PyExc_TypeError, msg.c_str());
+ return {nullptr, nullptr};
+ }
+
+ const type_info *typeinfo = nullptr;
+ const std::type_info *cpptype = nullptr;
+ void *value = nullptr;
+};
+
+/**
+ * Determine suitable casting operator for pointer-or-lvalue-casting type casters. The type caster
+ * needs to provide `operator T*()` and `operator T&()` operators.
+ *
+ * If the type supports moving the value away via an `operator T&&() &&` method, it should use
+ * `movable_cast_op_type` instead.
+ */
+template <typename T>
+using cast_op_type =
+ conditional_t<std::is_pointer<remove_reference_t<T>>::value,
+ typename std::add_pointer<intrinsic_t<T>>::type,
+ typename std::add_lvalue_reference<intrinsic_t<T>>::type>;
+
+/**
+ * Determine suitable casting operator for a type caster with a movable value. Such a type caster
+ * needs to provide `operator T*()`, `operator T&()`, and `operator T&&() &&`. The latter will be
+ * called in appropriate contexts where the value can be moved rather than copied.
+ *
+ * These operator are automatically provided when using the PYBIND11_TYPE_CASTER macro.
+ */
+template <typename T>
+using movable_cast_op_type =
+ conditional_t<std::is_pointer<typename std::remove_reference<T>::type>::value,
+ typename std::add_pointer<intrinsic_t<T>>::type,
+ conditional_t<std::is_rvalue_reference<T>::value,
+ typename std::add_rvalue_reference<intrinsic_t<T>>::type,
+ typename std::add_lvalue_reference<intrinsic_t<T>>::type>>;
+
+// std::is_copy_constructible isn't quite enough: it lets std::vector<T> (and similar) through when
+// T is non-copyable, but code containing such a copy constructor fails to actually compile.
+template <typename T, typename SFINAE = void> struct is_copy_constructible : std::is_copy_constructible<T> {};
+
+// Specialization for types that appear to be copy constructible but also look like stl containers
+// (we specifically check for: has `value_type` and `reference` with `reference = value_type&`): if
+// so, copy constructability depends on whether the value_type is copy constructible.
+template <typename Container> struct is_copy_constructible<Container, enable_if_t<all_of<
+ std::is_copy_constructible<Container>,
+ std::is_same<typename Container::value_type &, typename Container::reference>,
+ // Avoid infinite recursion
+ negation<std::is_same<Container, typename Container::value_type>>
+ >::value>> : is_copy_constructible<typename Container::value_type> {};
+
+// Likewise for std::pair
+// (after C++17 it is mandatory that the copy constructor not exist when the two types aren't themselves
+// copy constructible, but this can not be relied upon when T1 or T2 are themselves containers).
+template <typename T1, typename T2> struct is_copy_constructible<std::pair<T1, T2>>
+ : all_of<is_copy_constructible<T1>, is_copy_constructible<T2>> {};
+
+// The same problems arise with std::is_copy_assignable, so we use the same workaround.
+template <typename T, typename SFINAE = void> struct is_copy_assignable : std::is_copy_assignable<T> {};
+template <typename Container> struct is_copy_assignable<Container, enable_if_t<all_of<
+ std::is_copy_assignable<Container>,
+ std::is_same<typename Container::value_type &, typename Container::reference>
+ >::value>> : is_copy_assignable<typename Container::value_type> {};
+template <typename T1, typename T2> struct is_copy_assignable<std::pair<T1, T2>>
+ : all_of<is_copy_assignable<T1>, is_copy_assignable<T2>> {};
+
+PYBIND11_NAMESPACE_END(detail)
+
+// polymorphic_type_hook<itype>::get(src, tinfo) determines whether the object pointed
+// to by `src` actually is an instance of some class derived from `itype`.
+// If so, it sets `tinfo` to point to the std::type_info representing that derived
+// type, and returns a pointer to the start of the most-derived object of that type
+// (in which `src` is a subobject; this will be the same address as `src` in most
+// single inheritance cases). If not, or if `src` is nullptr, it simply returns `src`
+// and leaves `tinfo` at its default value of nullptr.
+//
+// The default polymorphic_type_hook just returns src. A specialization for polymorphic
+// types determines the runtime type of the passed object and adjusts the this-pointer
+// appropriately via dynamic_cast<void*>. This is what enables a C++ Animal* to appear
+// to Python as a Dog (if Dog inherits from Animal, Animal is polymorphic, Dog is
+// registered with pybind11, and this Animal is in fact a Dog).
+//
+// You may specialize polymorphic_type_hook yourself for types that want to appear
+// polymorphic to Python but do not use C++ RTTI. (This is a not uncommon pattern
+// in performance-sensitive applications, used most notably in LLVM.)
+//
+// polymorphic_type_hook_base allows users to specialize polymorphic_type_hook with
+// std::enable_if. User provided specializations will always have higher priority than
+// the default implementation and specialization provided in polymorphic_type_hook_base.
+template <typename itype, typename SFINAE = void>
+struct polymorphic_type_hook_base
+{
+ static const void *get(const itype *src, const std::type_info*&) { return src; }
+};
+template <typename itype>
+struct polymorphic_type_hook_base<itype, detail::enable_if_t<std::is_polymorphic<itype>::value>>
+{
+ static const void *get(const itype *src, const std::type_info*& type) {
+ type = src ? &typeid(*src) : nullptr;
+ return dynamic_cast<const void*>(src);
+ }
+};
+template <typename itype, typename SFINAE = void>
+struct polymorphic_type_hook : public polymorphic_type_hook_base<itype> {};
+
+PYBIND11_NAMESPACE_BEGIN(detail)
+
+/// Generic type caster for objects stored on the heap
+template <typename type> class type_caster_base : public type_caster_generic {
+ using itype = intrinsic_t<type>;
+
+public:
+ static constexpr auto name = _<type>();
+
+ type_caster_base() : type_caster_base(typeid(type)) { }
+ explicit type_caster_base(const std::type_info &info) : type_caster_generic(info) { }
+
+ static handle cast(const itype &src, return_value_policy policy, handle parent) {
+ if (policy == return_value_policy::automatic || policy == return_value_policy::automatic_reference)
+ policy = return_value_policy::copy;
+ return cast(&src, policy, parent);
+ }
+
+ static handle cast(itype &&src, return_value_policy, handle parent) {
+ return cast(&src, return_value_policy::move, parent);
+ }
+
+ // Returns a (pointer, type_info) pair taking care of necessary type lookup for a
+ // polymorphic type (using RTTI by default, but can be overridden by specializing
+ // polymorphic_type_hook). If the instance isn't derived, returns the base version.
+ static std::pair<const void *, const type_info *> src_and_type(const itype *src) {
+ auto &cast_type = typeid(itype);
+ const std::type_info *instance_type = nullptr;
+ const void *vsrc = polymorphic_type_hook<itype>::get(src, instance_type);
+ if (instance_type && !same_type(cast_type, *instance_type)) {
+ // This is a base pointer to a derived type. If the derived type is registered
+ // with pybind11, we want to make the full derived object available.
+ // In the typical case where itype is polymorphic, we get the correct
+ // derived pointer (which may be != base pointer) by a dynamic_cast to
+ // most derived type. If itype is not polymorphic, we won't get here
+ // except via a user-provided specialization of polymorphic_type_hook,
+ // and the user has promised that no this-pointer adjustment is
+ // required in that case, so it's OK to use static_cast.
+ if (const auto *tpi = get_type_info(*instance_type))
+ return {vsrc, tpi};
+ }
+ // Otherwise we have either a nullptr, an `itype` pointer, or an unknown derived pointer, so
+ // don't do a cast
+ return type_caster_generic::src_and_type(src, cast_type, instance_type);
+ }
+
+ static handle cast(const itype *src, return_value_policy policy, handle parent) {
+ auto st = src_and_type(src);
+ return type_caster_generic::cast(
+ st.first, policy, parent, st.second,
+ make_copy_constructor(src), make_move_constructor(src));
+ }
+
+ static handle cast_holder(const itype *src, const void *holder) {
+ auto st = src_and_type(src);
+ return type_caster_generic::cast(
+ st.first, return_value_policy::take_ownership, {}, st.second,
+ nullptr, nullptr, holder);
+ }
+
+ template <typename T> using cast_op_type = detail::cast_op_type<T>;
+
+ operator itype*() { return (type *) value; }
+ operator itype&() { if (!value) throw reference_cast_error(); return *((itype *) value); }
+
+protected:
+ using Constructor = void *(*)(const void *);
+
+ /* Only enabled when the types are {copy,move}-constructible *and* when the type
+ does not have a private operator new implementation. */
+ template <typename T, typename = enable_if_t<is_copy_constructible<T>::value>>
+ static auto make_copy_constructor(const T *x) -> decltype(new T(*x), Constructor{}) {
+ return [](const void *arg) -> void * {
+ return new T(*reinterpret_cast<const T *>(arg));
+ };
+ }
+
+ template <typename T, typename = enable_if_t<std::is_move_constructible<T>::value>>
+ static auto make_move_constructor(const T *x) -> decltype(new T(std::move(*const_cast<T *>(x))), Constructor{}) {
+ return [](const void *arg) -> void * {
+ return new T(std::move(*const_cast<T *>(reinterpret_cast<const T *>(arg))));
+ };
+ }
+
+ static Constructor make_copy_constructor(...) { return nullptr; }
+ static Constructor make_move_constructor(...) { return nullptr; }
+};
+
+template <typename type, typename SFINAE = void> class type_caster : public type_caster_base<type> { };
+template <typename type> using make_caster = type_caster<intrinsic_t<type>>;
+
+// Shortcut for calling a caster's `cast_op_type` cast operator for casting a type_caster to a T
+template <typename T> typename make_caster<T>::template cast_op_type<T> cast_op(make_caster<T> &caster) {
+ return caster.operator typename make_caster<T>::template cast_op_type<T>();
+}
+template <typename T> typename make_caster<T>::template cast_op_type<typename std::add_rvalue_reference<T>::type>
+cast_op(make_caster<T> &&caster) {
+ return std::move(caster).operator
+ typename make_caster<T>::template cast_op_type<typename std::add_rvalue_reference<T>::type>();
+}
+
+template <typename type> class type_caster<std::reference_wrapper<type>> {
+private:
+ using caster_t = make_caster<type>;
+ caster_t subcaster;
+ using subcaster_cast_op_type = typename caster_t::template cast_op_type<type>;
+ static_assert(std::is_same<typename std::remove_const<type>::type &, subcaster_cast_op_type>::value,
+ "std::reference_wrapper<T> caster requires T to have a caster with an `T &` operator");
+public:
+ bool load(handle src, bool convert) { return subcaster.load(src, convert); }
+ static constexpr auto name = caster_t::name;
+ static handle cast(const std::reference_wrapper<type> &src, return_value_policy policy, handle parent) {
+ // It is definitely wrong to take ownership of this pointer, so mask that rvp
+ if (policy == return_value_policy::take_ownership || policy == return_value_policy::automatic)
+ policy = return_value_policy::automatic_reference;
+ return caster_t::cast(&src.get(), policy, parent);
+ }
+ template <typename T> using cast_op_type = std::reference_wrapper<type>;
+ operator std::reference_wrapper<type>() { return subcaster.operator subcaster_cast_op_type&(); }
+};
+
+#define PYBIND11_TYPE_CASTER(type, py_name) \
+ protected: \
+ type value; \
+ public: \
+ static constexpr auto name = py_name; \
+ template <typename T_, enable_if_t<std::is_same<type, remove_cv_t<T_>>::value, int> = 0> \
+ static handle cast(T_ *src, return_value_policy policy, handle parent) { \
+ if (!src) return none().release(); \
+ if (policy == return_value_policy::take_ownership) { \
+ auto h = cast(std::move(*src), policy, parent); delete src; return h; \
+ } else { \
+ return cast(*src, policy, parent); \
+ } \
+ } \
+ operator type*() { return &value; } \
+ operator type&() { return value; } \
+ operator type&&() && { return std::move(value); } \
+ template <typename T_> using cast_op_type = pybind11::detail::movable_cast_op_type<T_>
+
+
+template <typename CharT> using is_std_char_type = any_of<
+ std::is_same<CharT, char>, /* std::string */
+#if defined(PYBIND11_HAS_U8STRING)
+ std::is_same<CharT, char8_t>, /* std::u8string */
+#endif
+ std::is_same<CharT, char16_t>, /* std::u16string */
+ std::is_same<CharT, char32_t>, /* std::u32string */
+ std::is_same<CharT, wchar_t> /* std::wstring */
+>;
+
+
+template <typename T>
+struct type_caster<T, enable_if_t<std::is_arithmetic<T>::value && !is_std_char_type<T>::value>> {
+ using _py_type_0 = conditional_t<sizeof(T) <= sizeof(long), long, long long>;
+ using _py_type_1 = conditional_t<std::is_signed<T>::value, _py_type_0, typename std::make_unsigned<_py_type_0>::type>;
+ using py_type = conditional_t<std::is_floating_point<T>::value, double, _py_type_1>;
+public:
+
+ bool load(handle src, bool convert) {
+ py_type py_value;
+
+ if (!src)
+ return false;
+
+ if (std::is_floating_point<T>::value) {
+ if (convert || PyFloat_Check(src.ptr()))
+ py_value = (py_type) PyFloat_AsDouble(src.ptr());
+ else
+ return false;
+ } else if (PyFloat_Check(src.ptr())) {
+ return false;
+ } else if (std::is_unsigned<py_type>::value) {
+ py_value = as_unsigned<py_type>(src.ptr());
+ } else { // signed integer:
+ py_value = sizeof(T) <= sizeof(long)
+ ? (py_type) PyLong_AsLong(src.ptr())
+ : (py_type) PYBIND11_LONG_AS_LONGLONG(src.ptr());
+ }
+
+ // Python API reported an error
+ bool py_err = py_value == (py_type) -1 && PyErr_Occurred();
+
+ // Check to see if the conversion is valid (integers should match exactly)
+ // Signed/unsigned checks happen elsewhere
+ if (py_err || (std::is_integral<T>::value && sizeof(py_type) != sizeof(T) && py_value != (py_type) (T) py_value)) {
+ bool type_error = py_err && PyErr_ExceptionMatches(
+#if PY_VERSION_HEX < 0x03000000 && !defined(PYPY_VERSION)
+ PyExc_SystemError
+#else
+ PyExc_TypeError
+#endif
+ );
+ PyErr_Clear();
+ if (type_error && convert && PyNumber_Check(src.ptr())) {
+ auto tmp = reinterpret_steal<object>(std::is_floating_point<T>::value
+ ? PyNumber_Float(src.ptr())
+ : PyNumber_Long(src.ptr()));
+ PyErr_Clear();
+ return load(tmp, false);
+ }
+ return false;
+ }
+
+ value = (T) py_value;
+ return true;
+ }
+
+ template<typename U = T>
+ static typename std::enable_if<std::is_floating_point<U>::value, handle>::type
+ cast(U src, return_value_policy /* policy */, handle /* parent */) {
+ return PyFloat_FromDouble((double) src);
+ }
+
+ template<typename U = T>
+ static typename std::enable_if<!std::is_floating_point<U>::value && std::is_signed<U>::value && (sizeof(U) <= sizeof(long)), handle>::type
+ cast(U src, return_value_policy /* policy */, handle /* parent */) {
+ return PYBIND11_LONG_FROM_SIGNED((long) src);
+ }
+
+ template<typename U = T>
+ static typename std::enable_if<!std::is_floating_point<U>::value && std::is_unsigned<U>::value && (sizeof(U) <= sizeof(unsigned long)), handle>::type
+ cast(U src, return_value_policy /* policy */, handle /* parent */) {
+ return PYBIND11_LONG_FROM_UNSIGNED((unsigned long) src);
+ }
+
+ template<typename U = T>
+ static typename std::enable_if<!std::is_floating_point<U>::value && std::is_signed<U>::value && (sizeof(U) > sizeof(long)), handle>::type
+ cast(U src, return_value_policy /* policy */, handle /* parent */) {
+ return PyLong_FromLongLong((long long) src);
+ }
+
+ template<typename U = T>
+ static typename std::enable_if<!std::is_floating_point<U>::value && std::is_unsigned<U>::value && (sizeof(U) > sizeof(unsigned long)), handle>::type
+ cast(U src, return_value_policy /* policy */, handle /* parent */) {
+ return PyLong_FromUnsignedLongLong((unsigned long long) src);
+ }
+
+ PYBIND11_TYPE_CASTER(T, _<std::is_integral<T>::value>("int", "float"));
+};
+
+template<typename T> struct void_caster {
+public:
+ bool load(handle src, bool) {
+ if (src && src.is_none())
+ return true;
+ return false;
+ }
+ static handle cast(T, return_value_policy /* policy */, handle /* parent */) {
+ return none().inc_ref();
+ }
+ PYBIND11_TYPE_CASTER(T, _("None"));
+};
+
+template <> class type_caster<void_type> : public void_caster<void_type> {};
+
+template <> class type_caster<void> : public type_caster<void_type> {
+public:
+ using type_caster<void_type>::cast;
+
+ bool load(handle h, bool) {
+ if (!h) {
+ return false;
+ } else if (h.is_none()) {
+ value = nullptr;
+ return true;
+ }
+
+ /* Check if this is a capsule */
+ if (isinstance<capsule>(h)) {
+ value = reinterpret_borrow<capsule>(h);
+ return true;
+ }
+
+ /* Check if this is a C++ type */
+ auto &bases = all_type_info((PyTypeObject *) type::handle_of(h).ptr());
+ if (bases.size() == 1) { // Only allowing loading from a single-value type
+ value = values_and_holders(reinterpret_cast<instance *>(h.ptr())).begin()->value_ptr();
+ return true;
+ }
+
+ /* Fail */
+ return false;
+ }
+
+ static handle cast(const void *ptr, return_value_policy /* policy */, handle /* parent */) {
+ if (ptr)
+ return capsule(ptr).release();
+ else
+ return none().inc_ref();
+ }
+
+ template <typename T> using cast_op_type = void*&;
+ operator void *&() { return value; }
+ static constexpr auto name = _("capsule");
+private:
+ void *value = nullptr;
+};
+
+template <> class type_caster<std::nullptr_t> : public void_caster<std::nullptr_t> { };
+
+template <> class type_caster<bool> {
+public:
+ bool load(handle src, bool convert) {
+ if (!src) return false;
+ else if (src.ptr() == Py_True) { value = true; return true; }
+ else if (src.ptr() == Py_False) { value = false; return true; }
+ else if (convert || !strcmp("numpy.bool_", Py_TYPE(src.ptr())->tp_name)) {
+ // (allow non-implicit conversion for numpy booleans)
+
+ Py_ssize_t res = -1;
+ if (src.is_none()) {
+ res = 0; // None is implicitly converted to False
+ }
+ #if defined(PYPY_VERSION)
+ // On PyPy, check that "__bool__" (or "__nonzero__" on Python 2.7) attr exists
+ else if (hasattr(src, PYBIND11_BOOL_ATTR)) {
+ res = PyObject_IsTrue(src.ptr());
+ }
+ #else
+ // Alternate approach for CPython: this does the same as the above, but optimized
+ // using the CPython API so as to avoid an unneeded attribute lookup.
+ else if (auto tp_as_number = src.ptr()->ob_type->tp_as_number) {
+ if (PYBIND11_NB_BOOL(tp_as_number)) {
+ res = (*PYBIND11_NB_BOOL(tp_as_number))(src.ptr());
+ }
+ }
+ #endif
+ if (res == 0 || res == 1) {
+ value = (bool) res;
+ return true;
+ } else {
+ PyErr_Clear();
+ }
+ }
+ return false;
+ }
+ static handle cast(bool src, return_value_policy /* policy */, handle /* parent */) {
+ return handle(src ? Py_True : Py_False).inc_ref();
+ }
+ PYBIND11_TYPE_CASTER(bool, _("bool"));
+};
+
+// Helper class for UTF-{8,16,32} C++ stl strings:
+template <typename StringType, bool IsView = false> struct string_caster {
+ using CharT = typename StringType::value_type;
+
+ // Simplify life by being able to assume standard char sizes (the standard only guarantees
+ // minimums, but Python requires exact sizes)
+ static_assert(!std::is_same<CharT, char>::value || sizeof(CharT) == 1, "Unsupported char size != 1");
+#if defined(PYBIND11_HAS_U8STRING)
+ static_assert(!std::is_same<CharT, char8_t>::value || sizeof(CharT) == 1, "Unsupported char8_t size != 1");
+#endif
+ static_assert(!std::is_same<CharT, char16_t>::value || sizeof(CharT) == 2, "Unsupported char16_t size != 2");
+ static_assert(!std::is_same<CharT, char32_t>::value || sizeof(CharT) == 4, "Unsupported char32_t size != 4");
+ // wchar_t can be either 16 bits (Windows) or 32 (everywhere else)
+ static_assert(!std::is_same<CharT, wchar_t>::value || sizeof(CharT) == 2 || sizeof(CharT) == 4,
+ "Unsupported wchar_t size != 2/4");
+ static constexpr size_t UTF_N = 8 * sizeof(CharT);
+
+ bool load(handle src, bool) {
+#if PY_MAJOR_VERSION < 3
+ object temp;
+#endif
+ handle load_src = src;
+ if (!src) {
+ return false;
+ } else if (!PyUnicode_Check(load_src.ptr())) {
+#if PY_MAJOR_VERSION >= 3
+ return load_bytes(load_src);
+#else
+ if (std::is_same<CharT, char>::value) {
+ return load_bytes(load_src);
+ }
+
+ // The below is a guaranteed failure in Python 3 when PyUnicode_Check returns false
+ if (!PYBIND11_BYTES_CHECK(load_src.ptr()))
+ return false;
+
+ temp = reinterpret_steal<object>(PyUnicode_FromObject(load_src.ptr()));
+ if (!temp) { PyErr_Clear(); return false; }
+ load_src = temp;
+#endif
+ }
+
+ auto utfNbytes = reinterpret_steal<object>(PyUnicode_AsEncodedString(
+ load_src.ptr(), UTF_N == 8 ? "utf-8" : UTF_N == 16 ? "utf-16" : "utf-32", nullptr));
+ if (!utfNbytes) { PyErr_Clear(); return false; }
+
+ const auto *buffer = reinterpret_cast<const CharT *>(PYBIND11_BYTES_AS_STRING(utfNbytes.ptr()));
+ size_t length = (size_t) PYBIND11_BYTES_SIZE(utfNbytes.ptr()) / sizeof(CharT);
+ if (UTF_N > 8) { buffer++; length--; } // Skip BOM for UTF-16/32
+ value = StringType(buffer, length);
+
+ // If we're loading a string_view we need to keep the encoded Python object alive:
+ if (IsView)
+ loader_life_support::add_patient(utfNbytes);
+
+ return true;
+ }
+
+ static handle cast(const StringType &src, return_value_policy /* policy */, handle /* parent */) {
+ const char *buffer = reinterpret_cast<const char *>(src.data());
+ auto nbytes = ssize_t(src.size() * sizeof(CharT));
+ handle s = decode_utfN(buffer, nbytes);
+ if (!s) throw error_already_set();
+ return s;
+ }
+
+ PYBIND11_TYPE_CASTER(StringType, _(PYBIND11_STRING_NAME));
+
+private:
+ static handle decode_utfN(const char *buffer, ssize_t nbytes) {
+#if !defined(PYPY_VERSION)
+ return
+ UTF_N == 8 ? PyUnicode_DecodeUTF8(buffer, nbytes, nullptr) :
+ UTF_N == 16 ? PyUnicode_DecodeUTF16(buffer, nbytes, nullptr, nullptr) :
+ PyUnicode_DecodeUTF32(buffer, nbytes, nullptr, nullptr);
+#else
+ // PyPy segfaults when on PyUnicode_DecodeUTF16 (and possibly on PyUnicode_DecodeUTF32 as well),
+ // so bypass the whole thing by just passing the encoding as a string value, which works properly:
+ return PyUnicode_Decode(buffer, nbytes, UTF_N == 8 ? "utf-8" : UTF_N == 16 ? "utf-16" : "utf-32", nullptr);
+#endif
+ }
+
+ // When loading into a std::string or char*, accept a bytes object as-is (i.e.
+ // without any encoding/decoding attempt). For other C++ char sizes this is a no-op.
+ // which supports loading a unicode from a str, doesn't take this path.
+ template <typename C = CharT>
+ bool load_bytes(enable_if_t<std::is_same<C, char>::value, handle> src) {
+ if (PYBIND11_BYTES_CHECK(src.ptr())) {
+ // We were passed a Python 3 raw bytes; accept it into a std::string or char*
+ // without any encoding attempt.
+ const char *bytes = PYBIND11_BYTES_AS_STRING(src.ptr());
+ if (bytes) {
+ value = StringType(bytes, (size_t) PYBIND11_BYTES_SIZE(src.ptr()));
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ template <typename C = CharT>
+ bool load_bytes(enable_if_t<!std::is_same<C, char>::value, handle>) { return false; }
+};
+
+template <typename CharT, class Traits, class Allocator>
+struct type_caster<std::basic_string<CharT, Traits, Allocator>, enable_if_t<is_std_char_type<CharT>::value>>
+ : string_caster<std::basic_string<CharT, Traits, Allocator>> {};
+
+#ifdef PYBIND11_HAS_STRING_VIEW
+template <typename CharT, class Traits>
+struct type_caster<std::basic_string_view<CharT, Traits>, enable_if_t<is_std_char_type<CharT>::value>>
+ : string_caster<std::basic_string_view<CharT, Traits>, true> {};
+#endif
+
+// Type caster for C-style strings. We basically use a std::string type caster, but also add the
+// ability to use None as a nullptr char* (which the string caster doesn't allow).
+template <typename CharT> struct type_caster<CharT, enable_if_t<is_std_char_type<CharT>::value>> {
+ using StringType = std::basic_string<CharT>;
+ using StringCaster = type_caster<StringType>;
+ StringCaster str_caster;
+ bool none = false;
+ CharT one_char = 0;
+public:
+ bool load(handle src, bool convert) {
+ if (!src) return false;
+ if (src.is_none()) {
+ // Defer accepting None to other overloads (if we aren't in convert mode):
+ if (!convert) return false;
+ none = true;
+ return true;
+ }
+ return str_caster.load(src, convert);
+ }
+
+ static handle cast(const CharT *src, return_value_policy policy, handle parent) {
+ if (src == nullptr) return pybind11::none().inc_ref();
+ return StringCaster::cast(StringType(src), policy, parent);
+ }
+
+ static handle cast(CharT src, return_value_policy policy, handle parent) {
+ if (std::is_same<char, CharT>::value) {
+ handle s = PyUnicode_DecodeLatin1((const char *) &src, 1, nullptr);
+ if (!s) throw error_already_set();
+ return s;
+ }
+ return StringCaster::cast(StringType(1, src), policy, parent);
+ }
+
+ operator CharT*() { return none ? nullptr : const_cast<CharT *>(static_cast<StringType &>(str_caster).c_str()); }
+ operator CharT&() {
+ if (none)
+ throw value_error("Cannot convert None to a character");
+
+ auto &value = static_cast<StringType &>(str_caster);
+ size_t str_len = value.size();
+ if (str_len == 0)
+ throw value_error("Cannot convert empty string to a character");
+
+ // If we're in UTF-8 mode, we have two possible failures: one for a unicode character that
+ // is too high, and one for multiple unicode characters (caught later), so we need to figure
+ // out how long the first encoded character is in bytes to distinguish between these two
+ // errors. We also allow want to allow unicode characters U+0080 through U+00FF, as those
+ // can fit into a single char value.
+ if (StringCaster::UTF_N == 8 && str_len > 1 && str_len <= 4) {
+ auto v0 = static_cast<unsigned char>(value[0]);
+ size_t char0_bytes = !(v0 & 0x80) ? 1 : // low bits only: 0-127
+ (v0 & 0xE0) == 0xC0 ? 2 : // 0b110xxxxx - start of 2-byte sequence
+ (v0 & 0xF0) == 0xE0 ? 3 : // 0b1110xxxx - start of 3-byte sequence
+ 4; // 0b11110xxx - start of 4-byte sequence
+
+ if (char0_bytes == str_len) {
+ // If we have a 128-255 value, we can decode it into a single char:
+ if (char0_bytes == 2 && (v0 & 0xFC) == 0xC0) { // 0x110000xx 0x10xxxxxx
+ one_char = static_cast<CharT>(((v0 & 3) << 6) + (static_cast<unsigned char>(value[1]) & 0x3F));
+ return one_char;
+ }
+ // Otherwise we have a single character, but it's > U+00FF
+ throw value_error("Character code point not in range(0x100)");
+ }
+ }
+
+ // UTF-16 is much easier: we can only have a surrogate pair for values above U+FFFF, thus a
+ // surrogate pair with total length 2 instantly indicates a range error (but not a "your
+ // string was too long" error).
+ else if (StringCaster::UTF_N == 16 && str_len == 2) {
+ one_char = static_cast<CharT>(value[0]);
+ if (one_char >= 0xD800 && one_char < 0xE000)
+ throw value_error("Character code point not in range(0x10000)");
+ }
+
+ if (str_len != 1)
+ throw value_error("Expected a character, but multi-character string found");
+
+ one_char = value[0];
+ return one_char;
+ }
+
+ static constexpr auto name = _(PYBIND11_STRING_NAME);
+ template <typename _T> using cast_op_type = pybind11::detail::cast_op_type<_T>;
+};
+
+// Base implementation for std::tuple and std::pair
+template <template<typename...> class Tuple, typename... Ts> class tuple_caster {
+ using type = Tuple<Ts...>;
+ static constexpr auto size = sizeof...(Ts);
+ using indices = make_index_sequence<size>;
+public:
+
+ bool load(handle src, bool convert) {
+ if (!isinstance<sequence>(src))
+ return false;
+ const auto seq = reinterpret_borrow<sequence>(src);
+ if (seq.size() != size)
+ return false;
+ return load_impl(seq, convert, indices{});
+ }
+
+ template <typename T>
+ static handle cast(T &&src, return_value_policy policy, handle parent) {
+ return cast_impl(std::forward<T>(src), policy, parent, indices{});
+ }
+
+ // copied from the PYBIND11_TYPE_CASTER macro
+ template <typename T>
+ static handle cast(T *src, return_value_policy policy, handle parent) {
+ if (!src) return none().release();
+ if (policy == return_value_policy::take_ownership) {
+ auto h = cast(std::move(*src), policy, parent); delete src; return h;
+ } else {
+ return cast(*src, policy, parent);
+ }
+ }
+
+ static constexpr auto name = _("Tuple[") + concat(make_caster<Ts>::name...) + _("]");
+
+ template <typename T> using cast_op_type = type;
+
+ operator type() & { return implicit_cast(indices{}); }
+ operator type() && { return std::move(*this).implicit_cast(indices{}); }
+
+protected:
+ template <size_t... Is>
+ type implicit_cast(index_sequence<Is...>) & { return type(cast_op<Ts>(std::get<Is>(subcasters))...); }
+ template <size_t... Is>
+ type implicit_cast(index_sequence<Is...>) && { return type(cast_op<Ts>(std::move(std::get<Is>(subcasters)))...); }
+
+ static constexpr bool load_impl(const sequence &, bool, index_sequence<>) { return true; }
+
+ template <size_t... Is>
+ bool load_impl(const sequence &seq, bool convert, index_sequence<Is...>) {
+#ifdef __cpp_fold_expressions
+ if ((... || !std::get<Is>(subcasters).load(seq[Is], convert)))
+ return false;
+#else
+ for (bool r : {std::get<Is>(subcasters).load(seq[Is], convert)...})
+ if (!r)
+ return false;
+#endif
+ return true;
+ }
+
+ /* Implementation: Convert a C++ tuple into a Python tuple */
+ template <typename T, size_t... Is>
+ static handle cast_impl(T &&src, return_value_policy policy, handle parent, index_sequence<Is...>) {
+ std::array<object, size> entries{{
+ reinterpret_steal<object>(make_caster<Ts>::cast(std::get<Is>(std::forward<T>(src)), policy, parent))...
+ }};
+ for (const auto &entry: entries)
+ if (!entry)
+ return handle();
+ tuple result(size);
+ int counter = 0;
+ for (auto & entry: entries)
+ PyTuple_SET_ITEM(result.ptr(), counter++, entry.release().ptr());
+ return result.release();
+ }
+
+ Tuple<make_caster<Ts>...> subcasters;
+};
+
+template <typename T1, typename T2> class type_caster<std::pair<T1, T2>>
+ : public tuple_caster<std::pair, T1, T2> {};
+
+template <typename... Ts> class type_caster<std::tuple<Ts...>>
+ : public tuple_caster<std::tuple, Ts...> {};
+
+/// Helper class which abstracts away certain actions. Users can provide specializations for
+/// custom holders, but it's only necessary if the type has a non-standard interface.
+template <typename T>
+struct holder_helper {
+ static auto get(const T &p) -> decltype(p.get()) { return p.get(); }
+};
+
+/// Type caster for holder types like std::shared_ptr, etc.
+template <typename type, typename holder_type>
+struct copyable_holder_caster : public type_caster_base<type> {
+public:
+ using base = type_caster_base<type>;
+ static_assert(std::is_base_of<base, type_caster<type>>::value,
+ "Holder classes are only supported for custom types");
+ using base::base;
+ using base::cast;
+ using base::typeinfo;
+ using base::value;
+
+ bool load(handle src, bool convert) {
+ return base::template load_impl<copyable_holder_caster<type, holder_type>>(src, convert);
+ }
+
+ explicit operator type*() { return this->value; }
+ // static_cast works around compiler error with MSVC 17 and CUDA 10.2
+ // see issue #2180
+ explicit operator type&() { return *(static_cast<type *>(this->value)); }
+ explicit operator holder_type*() { return std::addressof(holder); }
+ explicit operator holder_type&() { return holder; }
+
+ static handle cast(const holder_type &src, return_value_policy, handle) {
+ const auto *ptr = holder_helper<holder_type>::get(src);
+ return type_caster_base<type>::cast_holder(ptr, &src);
+ }
+
+protected:
+ friend class type_caster_generic;
+ void check_holder_compat() {
+ if (typeinfo->default_holder)
+ throw cast_error("Unable to load a custom holder type from a default-holder instance");
+ }
+
+ bool load_value(value_and_holder &&v_h) {
+ if (v_h.holder_constructed()) {
+ value = v_h.value_ptr();
+ holder = v_h.template holder<holder_type>();
+ return true;
+ } else {
+ throw cast_error("Unable to cast from non-held to held instance (T& to Holder<T>) "
+#if defined(NDEBUG)
+ "(compile in debug mode for type information)");
+#else
+ "of type '" + type_id<holder_type>() + "''");
+#endif
+ }
+ }
+
+ template <typename T = holder_type, detail::enable_if_t<!std::is_constructible<T, const T &, type*>::value, int> = 0>
+ bool try_implicit_casts(handle, bool) { return false; }
+
+ template <typename T = holder_type, detail::enable_if_t<std::is_constructible<T, const T &, type*>::value, int> = 0>
+ bool try_implicit_casts(handle src, bool convert) {
+ for (auto &cast : typeinfo->implicit_casts) {
+ copyable_holder_caster sub_caster(*cast.first);
+ if (sub_caster.load(src, convert)) {
+ value = cast.second(sub_caster.value);
+ holder = holder_type(sub_caster.holder, (type *) value);
+ return true;
+ }
+ }
+ return false;
+ }
+
+ static bool try_direct_conversions(handle) { return false; }
+
+
+ holder_type holder;
+};
+
+/// Specialize for the common std::shared_ptr, so users don't need to
+template <typename T>
+class type_caster<std::shared_ptr<T>> : public copyable_holder_caster<T, std::shared_ptr<T>> { };
+
+template <typename type, typename holder_type>
+struct move_only_holder_caster {
+ static_assert(std::is_base_of<type_caster_base<type>, type_caster<type>>::value,
+ "Holder classes are only supported for custom types");
+
+ static handle cast(holder_type &&src, return_value_policy, handle) {
+ auto *ptr = holder_helper<holder_type>::get(src);
+ return type_caster_base<type>::cast_holder(ptr, std::addressof(src));
+ }
+ static constexpr auto name = type_caster_base<type>::name;
+};
+
+template <typename type, typename deleter>
+class type_caster<std::unique_ptr<type, deleter>>
+ : public move_only_holder_caster<type, std::unique_ptr<type, deleter>> { };
+
+template <typename type, typename holder_type>
+using type_caster_holder = conditional_t<is_copy_constructible<holder_type>::value,
+ copyable_holder_caster<type, holder_type>,
+ move_only_holder_caster<type, holder_type>>;
+
+template <typename T, bool Value = false> struct always_construct_holder { static constexpr bool value = Value; };
+
+/// Create a specialization for custom holder types (silently ignores std::shared_ptr)
+#define PYBIND11_DECLARE_HOLDER_TYPE(type, holder_type, ...) \
+ namespace pybind11 { namespace detail { \
+ template <typename type> \
+ struct always_construct_holder<holder_type> : always_construct_holder<void, ##__VA_ARGS__> { }; \
+ template <typename type> \
+ class type_caster<holder_type, enable_if_t<!is_shared_ptr<holder_type>::value>> \
+ : public type_caster_holder<type, holder_type> { }; \
+ }}
+
+// PYBIND11_DECLARE_HOLDER_TYPE holder types:
+template <typename base, typename holder> struct is_holder_type :
+ std::is_base_of<detail::type_caster_holder<base, holder>, detail::type_caster<holder>> {};
+// Specialization for always-supported unique_ptr holders:
+template <typename base, typename deleter> struct is_holder_type<base, std::unique_ptr<base, deleter>> :
+ std::true_type {};
+
+template <typename T> struct handle_type_name { static constexpr auto name = _<T>(); };
+template <> struct handle_type_name<bytes> { static constexpr auto name = _(PYBIND11_BYTES_NAME); };
+template <> struct handle_type_name<int_> { static constexpr auto name = _("int"); };
+template <> struct handle_type_name<iterable> { static constexpr auto name = _("Iterable"); };
+template <> struct handle_type_name<iterator> { static constexpr auto name = _("Iterator"); };
+template <> struct handle_type_name<none> { static constexpr auto name = _("None"); };
+template <> struct handle_type_name<args> { static constexpr auto name = _("*args"); };
+template <> struct handle_type_name<kwargs> { static constexpr auto name = _("**kwargs"); };
+
+template <typename type>
+struct pyobject_caster {
+ template <typename T = type, enable_if_t<std::is_same<T, handle>::value, int> = 0>
+ bool load(handle src, bool /* convert */) { value = src; return static_cast<bool>(value); }
+
+ template <typename T = type, enable_if_t<std::is_base_of<object, T>::value, int> = 0>
+ bool load(handle src, bool /* convert */) {
+ if (!isinstance<type>(src))
+ return false;
+ value = reinterpret_borrow<type>(src);
+ return true;
+ }
+
+ static handle cast(const handle &src, return_value_policy /* policy */, handle /* parent */) {
+ return src.inc_ref();
+ }
+ PYBIND11_TYPE_CASTER(type, handle_type_name<type>::name);
+};
+
+template <typename T>
+class type_caster<T, enable_if_t<is_pyobject<T>::value>> : public pyobject_caster<T> { };
+
+// Our conditions for enabling moving are quite restrictive:
+// At compile time:
+// - T needs to be a non-const, non-pointer, non-reference type
+// - type_caster<T>::operator T&() must exist
+// - the type must be move constructible (obviously)
+// At run-time:
+// - if the type is non-copy-constructible, the object must be the sole owner of the type (i.e. it
+// must have ref_count() == 1)h
+// If any of the above are not satisfied, we fall back to copying.
+template <typename T> using move_is_plain_type = satisfies_none_of<T,
+ std::is_void, std::is_pointer, std::is_reference, std::is_const
+>;
+template <typename T, typename SFINAE = void> struct move_always : std::false_type {};
+template <typename T> struct move_always<T, enable_if_t<all_of<
+ move_is_plain_type<T>,
+ negation<is_copy_constructible<T>>,
+ std::is_move_constructible<T>,
+ std::is_same<decltype(std::declval<make_caster<T>>().operator T&()), T&>
+>::value>> : std::true_type {};
+template <typename T, typename SFINAE = void> struct move_if_unreferenced : std::false_type {};
+template <typename T> struct move_if_unreferenced<T, enable_if_t<all_of<
+ move_is_plain_type<T>,
+ negation<move_always<T>>,
+ std::is_move_constructible<T>,
+ std::is_same<decltype(std::declval<make_caster<T>>().operator T&()), T&>
+>::value>> : std::true_type {};
+template <typename T> using move_never = none_of<move_always<T>, move_if_unreferenced<T>>;
+
+// Detect whether returning a `type` from a cast on type's type_caster is going to result in a
+// reference or pointer to a local variable of the type_caster. Basically, only
+// non-reference/pointer `type`s and reference/pointers from a type_caster_generic are safe;
+// everything else returns a reference/pointer to a local variable.
+template <typename type> using cast_is_temporary_value_reference = bool_constant<
+ (std::is_reference<type>::value || std::is_pointer<type>::value) &&
+ !std::is_base_of<type_caster_generic, make_caster<type>>::value &&
+ !std::is_same<intrinsic_t<type>, void>::value
+>;
+
+// When a value returned from a C++ function is being cast back to Python, we almost always want to
+// force `policy = move`, regardless of the return value policy the function/method was declared
+// with.
+template <typename Return, typename SFINAE = void> struct return_value_policy_override {
+ static return_value_policy policy(return_value_policy p) { return p; }
+};
+
+template <typename Return> struct return_value_policy_override<Return,
+ detail::enable_if_t<std::is_base_of<type_caster_generic, make_caster<Return>>::value, void>> {
+ static return_value_policy policy(return_value_policy p) {
+ return !std::is_lvalue_reference<Return>::value &&
+ !std::is_pointer<Return>::value
+ ? return_value_policy::move : p;
+ }
+};
+
+// Basic python -> C++ casting; throws if casting fails
+template <typename T, typename SFINAE> type_caster<T, SFINAE> &load_type(type_caster<T, SFINAE> &conv, const handle &handle) {
+ if (!conv.load(handle, true)) {
+#if defined(NDEBUG)
+ throw cast_error("Unable to cast Python instance to C++ type (compile in debug mode for details)");
+#else
+ throw cast_error("Unable to cast Python instance of type " +
+ (std::string) str(type::handle_of(handle)) + " to C++ type '" + type_id<T>() + "'");
+#endif
+ }
+ return conv;
+}
+// Wrapper around the above that also constructs and returns a type_caster
+template <typename T> make_caster<T> load_type(const handle &handle) {
+ make_caster<T> conv;
+ load_type(conv, handle);
+ return conv;
+}
+
+PYBIND11_NAMESPACE_END(detail)
+
+// pytype -> C++ type
+template <typename T, detail::enable_if_t<!detail::is_pyobject<T>::value, int> = 0>
+T cast(const handle &handle) {
+ using namespace detail;
+ static_assert(!cast_is_temporary_value_reference<T>::value,
+ "Unable to cast type to reference: value is local to type caster");
+ return cast_op<T>(load_type<T>(handle));
+}
+
+// pytype -> pytype (calls converting constructor)
+template <typename T, detail::enable_if_t<detail::is_pyobject<T>::value, int> = 0>
+T cast(const handle &handle) { return T(reinterpret_borrow<object>(handle)); }
+
+// C++ type -> py::object
+template <typename T, detail::enable_if_t<!detail::is_pyobject<T>::value, int> = 0>
+object cast(T &&value, return_value_policy policy = return_value_policy::automatic_reference,
+ handle parent = handle()) {
+ using no_ref_T = typename std::remove_reference<T>::type;
+ if (policy == return_value_policy::automatic)
+ policy = std::is_pointer<no_ref_T>::value ? return_value_policy::take_ownership :
+ std::is_lvalue_reference<T>::value ? return_value_policy::copy : return_value_policy::move;
+ else if (policy == return_value_policy::automatic_reference)
+ policy = std::is_pointer<no_ref_T>::value ? return_value_policy::reference :
+ std::is_lvalue_reference<T>::value ? return_value_policy::copy : return_value_policy::move;
+ return reinterpret_steal<object>(detail::make_caster<T>::cast(std::forward<T>(value), policy, parent));
+}
+
+template <typename T> T handle::cast() const { return pybind11::cast<T>(*this); }
+template <> inline void handle::cast() const { return; }
+
+template <typename T>
+detail::enable_if_t<!detail::move_never<T>::value, T> move(object &&obj) {
+ if (obj.ref_count() > 1)
+#if defined(NDEBUG)
+ throw cast_error("Unable to cast Python instance to C++ rvalue: instance has multiple references"
+ " (compile in debug mode for details)");
+#else
+ throw cast_error("Unable to move from Python " + (std::string) str(type::handle_of(obj)) +
+ " instance to C++ " + type_id<T>() + " instance: instance has multiple references");
+#endif
+
+ // Move into a temporary and return that, because the reference may be a local value of `conv`
+ T ret = std::move(detail::load_type<T>(obj).operator T&());
+ return ret;
+}
+
+// Calling cast() on an rvalue calls pybind11::cast with the object rvalue, which does:
+// - If we have to move (because T has no copy constructor), do it. This will fail if the moved
+// object has multiple references, but trying to copy will fail to compile.
+// - If both movable and copyable, check ref count: if 1, move; otherwise copy
+// - Otherwise (not movable), copy.
+template <typename T> detail::enable_if_t<detail::move_always<T>::value, T> cast(object &&object) {
+ return move<T>(std::move(object));
+}
+template <typename T> detail::enable_if_t<detail::move_if_unreferenced<T>::value, T> cast(object &&object) {
+ if (object.ref_count() > 1)
+ return cast<T>(object);
+ else
+ return move<T>(std::move(object));
+}
+template <typename T> detail::enable_if_t<detail::move_never<T>::value, T> cast(object &&object) {
+ return cast<T>(object);
+}
+
+template <typename T> T object::cast() const & { return pybind11::cast<T>(*this); }
+template <typename T> T object::cast() && { return pybind11::cast<T>(std::move(*this)); }
+template <> inline void object::cast() const & { return; }
+template <> inline void object::cast() && { return; }
+
+PYBIND11_NAMESPACE_BEGIN(detail)
+
+// Declared in pytypes.h:
+template <typename T, enable_if_t<!is_pyobject<T>::value, int>>
+object object_or_cast(T &&o) { return pybind11::cast(std::forward<T>(o)); }
+
+struct override_unused {}; // Placeholder type for the unneeded (and dead code) static variable in the PYBIND11_OVERRIDE_OVERRIDE macro
+template <typename ret_type> using override_caster_t = conditional_t<
+ cast_is_temporary_value_reference<ret_type>::value, make_caster<ret_type>, override_unused>;
+
+// Trampoline use: for reference/pointer types to value-converted values, we do a value cast, then
+// store the result in the given variable. For other types, this is a no-op.
+template <typename T> enable_if_t<cast_is_temporary_value_reference<T>::value, T> cast_ref(object &&o, make_caster<T> &caster) {
+ return cast_op<T>(load_type(caster, o));
+}
+template <typename T> enable_if_t<!cast_is_temporary_value_reference<T>::value, T> cast_ref(object &&, override_unused &) {
+ pybind11_fail("Internal error: cast_ref fallback invoked"); }
+
+// Trampoline use: Having a pybind11::cast with an invalid reference type is going to static_assert, even
+// though if it's in dead code, so we provide a "trampoline" to pybind11::cast that only does anything in
+// cases where pybind11::cast is valid.
+template <typename T> enable_if_t<!cast_is_temporary_value_reference<T>::value, T> cast_safe(object &&o) {
+ return pybind11::cast<T>(std::move(o)); }
+template <typename T> enable_if_t<cast_is_temporary_value_reference<T>::value, T> cast_safe(object &&) {
+ pybind11_fail("Internal error: cast_safe fallback invoked"); }
+template <> inline void cast_safe<void>(object &&) {}
+
+PYBIND11_NAMESPACE_END(detail)
+
+template <return_value_policy policy = return_value_policy::automatic_reference>
+tuple make_tuple() { return tuple(0); }
+
+template <return_value_policy policy = return_value_policy::automatic_reference,
+ typename... Args> tuple make_tuple(Args&&... args_) {
+ constexpr size_t size = sizeof...(Args);
+ std::array<object, size> args {
+ { reinterpret_steal<object>(detail::make_caster<Args>::cast(
+ std::forward<Args>(args_), policy, nullptr))... }
+ };
+ for (size_t i = 0; i < args.size(); i++) {
+ if (!args[i]) {
+#if defined(NDEBUG)
+ throw cast_error("make_tuple(): unable to convert arguments to Python object (compile in debug mode for details)");
+#else
+ std::array<std::string, size> argtypes { {type_id<Args>()...} };
+ throw cast_error("make_tuple(): unable to convert argument of type '" +
+ argtypes[i] + "' to Python object");
+#endif
+ }
+ }
+ tuple result(size);
+ int counter = 0;
+ for (auto &arg_value : args)
+ PyTuple_SET_ITEM(result.ptr(), counter++, arg_value.release().ptr());
+ return result;
+}
+
+/// \ingroup annotations
+/// Annotation for arguments
+struct arg {
+ /// Constructs an argument with the name of the argument; if null or omitted, this is a positional argument.
+ constexpr explicit arg(const char *name = nullptr) : name(name), flag_noconvert(false), flag_none(true) { }
+ /// Assign a value to this argument
+ template <typename T> arg_v operator=(T &&value) const;
+ /// Indicate that the type should not be converted in the type caster
+ arg &noconvert(bool flag = true) { flag_noconvert = flag; return *this; }
+ /// Indicates that the argument should/shouldn't allow None (e.g. for nullable pointer args)
+ arg &none(bool flag = true) { flag_none = flag; return *this; }
+
+ const char *name; ///< If non-null, this is a named kwargs argument
+ bool flag_noconvert : 1; ///< If set, do not allow conversion (requires a supporting type caster!)
+ bool flag_none : 1; ///< If set (the default), allow None to be passed to this argument
+};
+
+/// \ingroup annotations
+/// Annotation for arguments with values
+struct arg_v : arg {
+private:
+ template <typename T>
+ arg_v(arg &&base, T &&x, const char *descr = nullptr)
+ : arg(base),
+ value(reinterpret_steal<object>(
+ detail::make_caster<T>::cast(x, return_value_policy::automatic, {})
+ )),
+ descr(descr)
+#if !defined(NDEBUG)
+ , type(type_id<T>())
+#endif
+ { }
+
+public:
+ /// Direct construction with name, default, and description
+ template <typename T>
+ arg_v(const char *name, T &&x, const char *descr = nullptr)
+ : arg_v(arg(name), std::forward<T>(x), descr) { }
+
+ /// Called internally when invoking `py::arg("a") = value`
+ template <typename T>
+ arg_v(const arg &base, T &&x, const char *descr = nullptr)
+ : arg_v(arg(base), std::forward<T>(x), descr) { }
+
+ /// Same as `arg::noconvert()`, but returns *this as arg_v&, not arg&
+ arg_v &noconvert(bool flag = true) { arg::noconvert(flag); return *this; }
+
+ /// Same as `arg::nonone()`, but returns *this as arg_v&, not arg&
+ arg_v &none(bool flag = true) { arg::none(flag); return *this; }
+
+ /// The default value
+ object value;
+ /// The (optional) description of the default value
+ const char *descr;
+#if !defined(NDEBUG)
+ /// The C++ type name of the default value (only available when compiled in debug mode)
+ std::string type;
+#endif
+};
+
+/// \ingroup annotations
+/// Annotation indicating that all following arguments are keyword-only; the is the equivalent of an
+/// unnamed '*' argument (in Python 3)
+struct kw_only {};
+
+/// \ingroup annotations
+/// Annotation indicating that all previous arguments are positional-only; the is the equivalent of an
+/// unnamed '/' argument (in Python 3.8)
+struct pos_only {};
+
+template <typename T>
+arg_v arg::operator=(T &&value) const { return {std::move(*this), std::forward<T>(value)}; }
+
+/// Alias for backward compatibility -- to be removed in version 2.0
+template <typename /*unused*/> using arg_t = arg_v;
+
+inline namespace literals {
+/** \rst
+ String literal version of `arg`
+ \endrst */
+constexpr arg operator"" _a(const char *name, size_t) { return arg(name); }
+} // namespace literals
+
+PYBIND11_NAMESPACE_BEGIN(detail)
+
+// forward declaration (definition in attr.h)
+struct function_record;
+
+/// Internal data associated with a single function call
+struct function_call {
+ function_call(const function_record &f, handle p); // Implementation in attr.h
+
+ /// The function data:
+ const function_record &func;
+
+ /// Arguments passed to the function:
+ std::vector<handle> args;
+
+ /// The `convert` value the arguments should be loaded with
+ std::vector<bool> args_convert;
+
+ /// Extra references for the optional `py::args` and/or `py::kwargs` arguments (which, if
+ /// present, are also in `args` but without a reference).
+ object args_ref, kwargs_ref;
+
+ /// The parent, if any
+ handle parent;
+
+ /// If this is a call to an initializer, this argument contains `self`
+ handle init_self;
+};
+
+
+/// Helper class which loads arguments for C++ functions called from Python
+template <typename... Args>
+class argument_loader {
+ using indices = make_index_sequence<sizeof...(Args)>;
+
+ template <typename Arg> using argument_is_args = std::is_same<intrinsic_t<Arg>, args>;
+ template <typename Arg> using argument_is_kwargs = std::is_same<intrinsic_t<Arg>, kwargs>;
+ // Get args/kwargs argument positions relative to the end of the argument list:
+ static constexpr auto args_pos = constexpr_first<argument_is_args, Args...>() - (int) sizeof...(Args),
+ kwargs_pos = constexpr_first<argument_is_kwargs, Args...>() - (int) sizeof...(Args);
+
+ static constexpr bool args_kwargs_are_last = kwargs_pos >= - 1 && args_pos >= kwargs_pos - 1;
+
+ static_assert(args_kwargs_are_last, "py::args/py::kwargs are only permitted as the last argument(s) of a function");
+
+public:
+ static constexpr bool has_kwargs = kwargs_pos < 0;
+ static constexpr bool has_args = args_pos < 0;
+
+ static constexpr auto arg_names = concat(type_descr(make_caster<Args>::name)...);
+
+ bool load_args(function_call &call) {
+ return load_impl_sequence(call, indices{});
+ }
+
+ template <typename Return, typename Guard, typename Func>
+ enable_if_t<!std::is_void<Return>::value, Return> call(Func &&f) && {
+ return std::move(*this).template call_impl<Return>(std::forward<Func>(f), indices{}, Guard{});
+ }
+
+ template <typename Return, typename Guard, typename Func>
+ enable_if_t<std::is_void<Return>::value, void_type> call(Func &&f) && {
+ std::move(*this).template call_impl<Return>(std::forward<Func>(f), indices{}, Guard{});
+ return void_type();
+ }
+
+private:
+
+ static bool load_impl_sequence(function_call &, index_sequence<>) { return true; }
+
+ template <size_t... Is>
+ bool load_impl_sequence(function_call &call, index_sequence<Is...>) {
+#ifdef __cpp_fold_expressions
+ if ((... || !std::get<Is>(argcasters).load(call.args[Is], call.args_convert[Is])))
+ return false;
+#else
+ for (bool r : {std::get<Is>(argcasters).load(call.args[Is], call.args_convert[Is])...})
+ if (!r)
+ return false;
+#endif
+ return true;
+ }
+
+ template <typename Return, typename Func, size_t... Is, typename Guard>
+ Return call_impl(Func &&f, index_sequence<Is...>, Guard &&) && {
+ return std::forward<Func>(f)(cast_op<Args>(std::move(std::get<Is>(argcasters)))...);
+ }
+
+ std::tuple<make_caster<Args>...> argcasters;
+};
+
+/// Helper class which collects only positional arguments for a Python function call.
+/// A fancier version below can collect any argument, but this one is optimal for simple calls.
+template <return_value_policy policy>
+class simple_collector {
+public:
+ template <typename... Ts>
+ explicit simple_collector(Ts &&...values)
+ : m_args(pybind11::make_tuple<policy>(std::forward<Ts>(values)...)) { }
+
+ const tuple &args() const & { return m_args; }
+ dict kwargs() const { return {}; }
+
+ tuple args() && { return std::move(m_args); }
+
+ /// Call a Python function and pass the collected arguments
+ object call(PyObject *ptr) const {
+ PyObject *result = PyObject_CallObject(ptr, m_args.ptr());
+ if (!result)
+ throw error_already_set();
+ return reinterpret_steal<object>(result);
+ }
+
+private:
+ tuple m_args;
+};
+
+/// Helper class which collects positional, keyword, * and ** arguments for a Python function call
+template <return_value_policy policy>
+class unpacking_collector {
+public:
+ template <typename... Ts>
+ explicit unpacking_collector(Ts &&...values) {
+ // Tuples aren't (easily) resizable so a list is needed for collection,
+ // but the actual function call strictly requires a tuple.
+ auto args_list = list();
+ int _[] = { 0, (process(args_list, std::forward<Ts>(values)), 0)... };
+ ignore_unused(_);
+
+ m_args = std::move(args_list);
+ }
+
+ const tuple &args() const & { return m_args; }
+ const dict &kwargs() const & { return m_kwargs; }
+
+ tuple args() && { return std::move(m_args); }
+ dict kwargs() && { return std::move(m_kwargs); }
+
+ /// Call a Python function and pass the collected arguments
+ object call(PyObject *ptr) const {
+ PyObject *result = PyObject_Call(ptr, m_args.ptr(), m_kwargs.ptr());
+ if (!result)
+ throw error_already_set();
+ return reinterpret_steal<object>(result);
+ }
+
+private:
+ template <typename T>
+ void process(list &args_list, T &&x) {
+ auto o = reinterpret_steal<object>(detail::make_caster<T>::cast(std::forward<T>(x), policy, {}));
+ if (!o) {
+#if defined(NDEBUG)
+ argument_cast_error();
+#else
+ argument_cast_error(std::to_string(args_list.size()), type_id<T>());
+#endif
+ }
+ args_list.append(o);
+ }
+
+ void process(list &args_list, detail::args_proxy ap) {
+ for (auto a : ap)
+ args_list.append(a);
+ }
+
+ void process(list &/*args_list*/, arg_v a) {
+ if (!a.name)
+#if defined(NDEBUG)
+ nameless_argument_error();
+#else
+ nameless_argument_error(a.type);
+#endif
+
+ if (m_kwargs.contains(a.name)) {
+#if defined(NDEBUG)
+ multiple_values_error();
+#else
+ multiple_values_error(a.name);
+#endif
+ }
+ if (!a.value) {
+#if defined(NDEBUG)
+ argument_cast_error();
+#else
+ argument_cast_error(a.name, a.type);
+#endif
+ }
+ m_kwargs[a.name] = a.value;
+ }
+
+ void process(list &/*args_list*/, detail::kwargs_proxy kp) {
+ if (!kp)
+ return;
+ for (auto k : reinterpret_borrow<dict>(kp)) {
+ if (m_kwargs.contains(k.first)) {
+#if defined(NDEBUG)
+ multiple_values_error();
+#else
+ multiple_values_error(str(k.first));
+#endif
+ }
+ m_kwargs[k.first] = k.second;
+ }
+ }
+
+ [[noreturn]] static void nameless_argument_error() {
+ throw type_error("Got kwargs without a name; only named arguments "
+ "may be passed via py::arg() to a python function call. "
+ "(compile in debug mode for details)");
+ }
+ [[noreturn]] static void nameless_argument_error(std::string type) {
+ throw type_error("Got kwargs without a name of type '" + type + "'; only named "
+ "arguments may be passed via py::arg() to a python function call. ");
+ }
+ [[noreturn]] static void multiple_values_error() {
+ throw type_error("Got multiple values for keyword argument "
+ "(compile in debug mode for details)");
+ }
+
+ [[noreturn]] static void multiple_values_error(std::string name) {
+ throw type_error("Got multiple values for keyword argument '" + name + "'");
+ }
+
+ [[noreturn]] static void argument_cast_error() {
+ throw cast_error("Unable to convert call argument to Python object "
+ "(compile in debug mode for details)");
+ }
+
+ [[noreturn]] static void argument_cast_error(std::string name, std::string type) {
+ throw cast_error("Unable to convert call argument '" + name
+ + "' of type '" + type + "' to Python object");
+ }
+
+private:
+ tuple m_args;
+ dict m_kwargs;
+};
+
+/// Collect only positional arguments for a Python function call
+template <return_value_policy policy, typename... Args,
+ typename = enable_if_t<all_of<is_positional<Args>...>::value>>
+simple_collector<policy> collect_arguments(Args &&...args) {
+ return simple_collector<policy>(std::forward<Args>(args)...);
+}
+
+/// Collect all arguments, including keywords and unpacking (only instantiated when needed)
+template <return_value_policy policy, typename... Args,
+ typename = enable_if_t<!all_of<is_positional<Args>...>::value>>
+unpacking_collector<policy> collect_arguments(Args &&...args) {
+ // Following argument order rules for generalized unpacking according to PEP 448
+ static_assert(
+ constexpr_last<is_positional, Args...>() < constexpr_first<is_keyword_or_ds, Args...>()
+ && constexpr_last<is_s_unpacking, Args...>() < constexpr_first<is_ds_unpacking, Args...>(),
+ "Invalid function call: positional args must precede keywords and ** unpacking; "
+ "* unpacking must precede ** unpacking"
+ );
+ return unpacking_collector<policy>(std::forward<Args>(args)...);
+}
+
+template <typename Derived>
+template <return_value_policy policy, typename... Args>
+object object_api<Derived>::operator()(Args &&...args) const {
+ return detail::collect_arguments<policy>(std::forward<Args>(args)...).call(derived().ptr());
+}
+
+template <typename Derived>
+template <return_value_policy policy, typename... Args>
+object object_api<Derived>::call(Args &&...args) const {
+ return operator()<policy>(std::forward<Args>(args)...);
+}
+
+PYBIND11_NAMESPACE_END(detail)
+
+
+template<typename T>
+handle type::handle_of() {
+ static_assert(
+ std::is_base_of<detail::type_caster_generic, detail::make_caster<T>>::value,
+ "py::type::of<T> only supports the case where T is a registered C++ types."
+ );
+
+ return detail::get_type_handle(typeid(T), true);
+}
+
+
+#define PYBIND11_MAKE_OPAQUE(...) \
+ namespace pybind11 { namespace detail { \
+ template<> class type_caster<__VA_ARGS__> : public type_caster_base<__VA_ARGS__> { }; \
+ }}
+
+/// Lets you pass a type containing a `,` through a macro parameter without needing a separate
+/// typedef, e.g.: `PYBIND11_OVERRIDE(PYBIND11_TYPE(ReturnType<A, B>), PYBIND11_TYPE(Parent<C, D>), f, arg)`
+#define PYBIND11_TYPE(...) __VA_ARGS__
+
+PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
--- /dev/null
+/*
+ pybind11/chrono.h: Transparent conversion between std::chrono and python's datetime
+
+ Copyright (c) 2016 Trent Houliston <trent@houliston.me> and
+ Wenzel Jakob <wenzel.jakob@epfl.ch>
+
+ All rights reserved. Use of this source code is governed by a
+ BSD-style license that can be found in the LICENSE file.
+*/
+
+#pragma once
+
+#include "pybind11.h"
+#include <cmath>
+#include <ctime>
+#include <chrono>
+#include <datetime.h>
+
+// Backport the PyDateTime_DELTA functions from Python3.3 if required
+#ifndef PyDateTime_DELTA_GET_DAYS
+#define PyDateTime_DELTA_GET_DAYS(o) (((PyDateTime_Delta*)o)->days)
+#endif
+#ifndef PyDateTime_DELTA_GET_SECONDS
+#define PyDateTime_DELTA_GET_SECONDS(o) (((PyDateTime_Delta*)o)->seconds)
+#endif
+#ifndef PyDateTime_DELTA_GET_MICROSECONDS
+#define PyDateTime_DELTA_GET_MICROSECONDS(o) (((PyDateTime_Delta*)o)->microseconds)
+#endif
+
+PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_BEGIN(detail)
+
+template <typename type> class duration_caster {
+public:
+ using rep = typename type::rep;
+ using period = typename type::period;
+
+ using days = std::chrono::duration<uint_fast32_t, std::ratio<86400>>;
+
+ bool load(handle src, bool) {
+ using namespace std::chrono;
+
+ // Lazy initialise the PyDateTime import
+ if (!PyDateTimeAPI) { PyDateTime_IMPORT; }
+
+ if (!src) return false;
+ // If invoked with datetime.delta object
+ if (PyDelta_Check(src.ptr())) {
+ value = type(duration_cast<duration<rep, period>>(
+ days(PyDateTime_DELTA_GET_DAYS(src.ptr()))
+ + seconds(PyDateTime_DELTA_GET_SECONDS(src.ptr()))
+ + microseconds(PyDateTime_DELTA_GET_MICROSECONDS(src.ptr()))));
+ return true;
+ }
+ // If invoked with a float we assume it is seconds and convert
+ else if (PyFloat_Check(src.ptr())) {
+ value = type(duration_cast<duration<rep, period>>(duration<double>(PyFloat_AsDouble(src.ptr()))));
+ return true;
+ }
+ else return false;
+ }
+
+ // If this is a duration just return it back
+ static const std::chrono::duration<rep, period>& get_duration(const std::chrono::duration<rep, period> &src) {
+ return src;
+ }
+
+ // If this is a time_point get the time_since_epoch
+ template <typename Clock> static std::chrono::duration<rep, period> get_duration(const std::chrono::time_point<Clock, std::chrono::duration<rep, period>> &src) {
+ return src.time_since_epoch();
+ }
+
+ static handle cast(const type &src, return_value_policy /* policy */, handle /* parent */) {
+ using namespace std::chrono;
+
+ // Use overloaded function to get our duration from our source
+ // Works out if it is a duration or time_point and get the duration
+ auto d = get_duration(src);
+
+ // Lazy initialise the PyDateTime import
+ if (!PyDateTimeAPI) { PyDateTime_IMPORT; }
+
+ // Declare these special duration types so the conversions happen with the correct primitive types (int)
+ using dd_t = duration<int, std::ratio<86400>>;
+ using ss_t = duration<int, std::ratio<1>>;
+ using us_t = duration<int, std::micro>;
+
+ auto dd = duration_cast<dd_t>(d);
+ auto subd = d - dd;
+ auto ss = duration_cast<ss_t>(subd);
+ auto us = duration_cast<us_t>(subd - ss);
+ return PyDelta_FromDSU(dd.count(), ss.count(), us.count());
+ }
+
+ PYBIND11_TYPE_CASTER(type, _("datetime.timedelta"));
+};
+
+// This is for casting times on the system clock into datetime.datetime instances
+template <typename Duration> class type_caster<std::chrono::time_point<std::chrono::system_clock, Duration>> {
+public:
+ using type = std::chrono::time_point<std::chrono::system_clock, Duration>;
+ bool load(handle src, bool) {
+ using namespace std::chrono;
+
+ // Lazy initialise the PyDateTime import
+ if (!PyDateTimeAPI) { PyDateTime_IMPORT; }
+
+ if (!src) return false;
+
+ std::tm cal;
+ microseconds msecs;
+
+ if (PyDateTime_Check(src.ptr())) {
+ cal.tm_sec = PyDateTime_DATE_GET_SECOND(src.ptr());
+ cal.tm_min = PyDateTime_DATE_GET_MINUTE(src.ptr());
+ cal.tm_hour = PyDateTime_DATE_GET_HOUR(src.ptr());
+ cal.tm_mday = PyDateTime_GET_DAY(src.ptr());
+ cal.tm_mon = PyDateTime_GET_MONTH(src.ptr()) - 1;
+ cal.tm_year = PyDateTime_GET_YEAR(src.ptr()) - 1900;
+ cal.tm_isdst = -1;
+ msecs = microseconds(PyDateTime_DATE_GET_MICROSECOND(src.ptr()));
+ } else if (PyDate_Check(src.ptr())) {
+ cal.tm_sec = 0;
+ cal.tm_min = 0;
+ cal.tm_hour = 0;
+ cal.tm_mday = PyDateTime_GET_DAY(src.ptr());
+ cal.tm_mon = PyDateTime_GET_MONTH(src.ptr()) - 1;
+ cal.tm_year = PyDateTime_GET_YEAR(src.ptr()) - 1900;
+ cal.tm_isdst = -1;
+ msecs = microseconds(0);
+ } else if (PyTime_Check(src.ptr())) {
+ cal.tm_sec = PyDateTime_TIME_GET_SECOND(src.ptr());
+ cal.tm_min = PyDateTime_TIME_GET_MINUTE(src.ptr());
+ cal.tm_hour = PyDateTime_TIME_GET_HOUR(src.ptr());
+ cal.tm_mday = 1; // This date (day, month, year) = (1, 0, 70)
+ cal.tm_mon = 0; // represents 1-Jan-1970, which is the first
+ cal.tm_year = 70; // earliest available date for Python's datetime
+ cal.tm_isdst = -1;
+ msecs = microseconds(PyDateTime_TIME_GET_MICROSECOND(src.ptr()));
+ }
+ else return false;
+
+ value = time_point_cast<Duration>(system_clock::from_time_t(std::mktime(&cal)) + msecs);
+ return true;
+ }
+
+ static handle cast(const std::chrono::time_point<std::chrono::system_clock, Duration> &src, return_value_policy /* policy */, handle /* parent */) {
+ using namespace std::chrono;
+
+ // Lazy initialise the PyDateTime import
+ if (!PyDateTimeAPI) { PyDateTime_IMPORT; }
+
+ // Get out microseconds, and make sure they are positive, to avoid bug in eastern hemisphere time zones
+ // (cfr. https://github.com/pybind/pybind11/issues/2417)
+ using us_t = duration<int, std::micro>;
+ auto us = duration_cast<us_t>(src.time_since_epoch() % seconds(1));
+ if (us.count() < 0)
+ us += seconds(1);
+
+ // Subtract microseconds BEFORE `system_clock::to_time_t`, because:
+ // > If std::time_t has lower precision, it is implementation-defined whether the value is rounded or truncated.
+ // (https://en.cppreference.com/w/cpp/chrono/system_clock/to_time_t)
+ std::time_t tt = system_clock::to_time_t(time_point_cast<system_clock::duration>(src - us));
+ // this function uses static memory so it's best to copy it out asap just in case
+ // otherwise other code that is using localtime may break this (not just python code)
+ std::tm localtime = *std::localtime(&tt);
+
+ return PyDateTime_FromDateAndTime(localtime.tm_year + 1900,
+ localtime.tm_mon + 1,
+ localtime.tm_mday,
+ localtime.tm_hour,
+ localtime.tm_min,
+ localtime.tm_sec,
+ us.count());
+ }
+ PYBIND11_TYPE_CASTER(type, _("datetime.datetime"));
+};
+
+// Other clocks that are not the system clock are not measured as datetime.datetime objects
+// since they are not measured on calendar time. So instead we just make them timedeltas
+// Or if they have passed us a time as a float we convert that
+template <typename Clock, typename Duration> class type_caster<std::chrono::time_point<Clock, Duration>>
+: public duration_caster<std::chrono::time_point<Clock, Duration>> {
+};
+
+template <typename Rep, typename Period> class type_caster<std::chrono::duration<Rep, Period>>
+: public duration_caster<std::chrono::duration<Rep, Period>> {
+};
+
+PYBIND11_NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
--- /dev/null
+#include "detail/common.h"
+#warning "Including 'common.h' is deprecated. It will be removed in v3.0. Use 'pybind11.h'."
--- /dev/null
+/*
+ pybind11/complex.h: Complex number support
+
+ Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>
+
+ All rights reserved. Use of this source code is governed by a
+ BSD-style license that can be found in the LICENSE file.
+*/
+
+#pragma once
+
+#include "pybind11.h"
+#include <complex>
+
+/// glibc defines I as a macro which breaks things, e.g., boost template names
+#ifdef I
+# undef I
+#endif
+
+PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
+
+template <typename T> struct format_descriptor<std::complex<T>, detail::enable_if_t<std::is_floating_point<T>::value>> {
+ static constexpr const char c = format_descriptor<T>::c;
+ static constexpr const char value[3] = { 'Z', c, '\0' };
+ static std::string format() { return std::string(value); }
+};
+
+#ifndef PYBIND11_CPP17
+
+template <typename T> constexpr const char format_descriptor<
+ std::complex<T>, detail::enable_if_t<std::is_floating_point<T>::value>>::value[3];
+
+#endif
+
+PYBIND11_NAMESPACE_BEGIN(detail)
+
+template <typename T> struct is_fmt_numeric<std::complex<T>, detail::enable_if_t<std::is_floating_point<T>::value>> {
+ static constexpr bool value = true;
+ static constexpr int index = is_fmt_numeric<T>::index + 3;
+};
+
+template <typename T> class type_caster<std::complex<T>> {
+public:
+ bool load(handle src, bool convert) {
+ if (!src)
+ return false;
+ if (!convert && !PyComplex_Check(src.ptr()))
+ return false;
+ Py_complex result = PyComplex_AsCComplex(src.ptr());
+ if (result.real == -1.0 && PyErr_Occurred()) {
+ PyErr_Clear();
+ return false;
+ }
+ value = std::complex<T>((T) result.real, (T) result.imag);
+ return true;
+ }
+
+ static handle cast(const std::complex<T> &src, return_value_policy /* policy */, handle /* parent */) {
+ return PyComplex_FromDoubles((double) src.real(), (double) src.imag());
+ }
+
+ PYBIND11_TYPE_CASTER(std::complex<T>, _("complex"));
+};
+PYBIND11_NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
--- /dev/null
+SET(HEADERS
+ class.h
+ common.h
+ descr.h
+ init.h
+ internals.h
+ typeid.h
+)
+
+install(FILES ${HEADERS} DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/dune/python/pybind11/detail)
--- /dev/null
+/*
+ pybind11/detail/class.h: Python C API implementation details for py::class_
+
+ Copyright (c) 2017 Wenzel Jakob <wenzel.jakob@epfl.ch>
+
+ All rights reserved. Use of this source code is governed by a
+ BSD-style license that can be found in the LICENSE file.
+*/
+
+#pragma once
+
+#include "../attr.h"
+#include "../options.h"
+
+PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_BEGIN(detail)
+
+#if PY_VERSION_HEX >= 0x03030000 && !defined(PYPY_VERSION)
+# define PYBIND11_BUILTIN_QUALNAME
+# define PYBIND11_SET_OLDPY_QUALNAME(obj, nameobj)
+#else
+// In pre-3.3 Python, we still set __qualname__ so that we can produce reliable function type
+// signatures; in 3.3+ this macro expands to nothing:
+# define PYBIND11_SET_OLDPY_QUALNAME(obj, nameobj) setattr((PyObject *) obj, "__qualname__", nameobj)
+#endif
+
+inline std::string get_fully_qualified_tp_name(PyTypeObject *type) {
+#if !defined(PYPY_VERSION)
+ return type->tp_name;
+#else
+ auto module_name = handle((PyObject *) type).attr("__module__").cast<std::string>();
+ if (module_name == PYBIND11_BUILTINS_MODULE)
+ return type->tp_name;
+ else
+ return std::move(module_name) + "." + type->tp_name;
+#endif
+}
+
+inline PyTypeObject *type_incref(PyTypeObject *type) {
+ Py_INCREF(type);
+ return type;
+}
+
+#if !defined(PYPY_VERSION)
+
+/// `pybind11_static_property.__get__()`: Always pass the class instead of the instance.
+extern "C" inline PyObject *pybind11_static_get(PyObject *self, PyObject * /*ob*/, PyObject *cls) {
+ return PyProperty_Type.tp_descr_get(self, cls, cls);
+}
+
+/// `pybind11_static_property.__set__()`: Just like the above `__get__()`.
+extern "C" inline int pybind11_static_set(PyObject *self, PyObject *obj, PyObject *value) {
+ PyObject *cls = PyType_Check(obj) ? obj : (PyObject *) Py_TYPE(obj);
+ return PyProperty_Type.tp_descr_set(self, cls, value);
+}
+
+/** A `static_property` is the same as a `property` but the `__get__()` and `__set__()`
+ methods are modified to always use the object type instead of a concrete instance.
+ Return value: New reference. */
+inline PyTypeObject *make_static_property_type() {
+ constexpr auto *name = "pybind11_static_property";
+ auto name_obj = reinterpret_steal<object>(PYBIND11_FROM_STRING(name));
+
+ /* Danger zone: from now (and until PyType_Ready), make sure to
+ issue no Python C API calls which could potentially invoke the
+ garbage collector (the GC will call type_traverse(), which will in
+ turn find the newly constructed type in an invalid state) */
+ auto heap_type = (PyHeapTypeObject *) PyType_Type.tp_alloc(&PyType_Type, 0);
+ if (!heap_type)
+ pybind11_fail("make_static_property_type(): error allocating type!");
+
+ heap_type->ht_name = name_obj.inc_ref().ptr();
+#ifdef PYBIND11_BUILTIN_QUALNAME
+ heap_type->ht_qualname = name_obj.inc_ref().ptr();
+#endif
+
+ auto type = &heap_type->ht_type;
+ type->tp_name = name;
+ type->tp_base = type_incref(&PyProperty_Type);
+ type->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE;
+ type->tp_descr_get = pybind11_static_get;
+ type->tp_descr_set = pybind11_static_set;
+
+ if (PyType_Ready(type) < 0)
+ pybind11_fail("make_static_property_type(): failure in PyType_Ready()!");
+
+ setattr((PyObject *) type, "__module__", str("pybind11_builtins"));
+ PYBIND11_SET_OLDPY_QUALNAME(type, name_obj);
+
+ return type;
+}
+
+#else // PYPY
+
+/** PyPy has some issues with the above C API, so we evaluate Python code instead.
+ This function will only be called once so performance isn't really a concern.
+ Return value: New reference. */
+inline PyTypeObject *make_static_property_type() {
+ auto d = dict();
+ PyObject *result = PyRun_String(R"(\
+ class pybind11_static_property(property):
+ def __get__(self, obj, cls):
+ return property.__get__(self, cls, cls)
+
+ def __set__(self, obj, value):
+ cls = obj if isinstance(obj, type) else type(obj)
+ property.__set__(self, cls, value)
+ )", Py_file_input, d.ptr(), d.ptr()
+ );
+ if (result == nullptr)
+ throw error_already_set();
+ Py_DECREF(result);
+ return (PyTypeObject *) d["pybind11_static_property"].cast<object>().release().ptr();
+}
+
+#endif // PYPY
+
+/** Types with static properties need to handle `Type.static_prop = x` in a specific way.
+ By default, Python replaces the `static_property` itself, but for wrapped C++ types
+ we need to call `static_property.__set__()` in order to propagate the new value to
+ the underlying C++ data structure. */
+extern "C" inline int pybind11_meta_setattro(PyObject* obj, PyObject* name, PyObject* value) {
+ // Use `_PyType_Lookup()` instead of `PyObject_GetAttr()` in order to get the raw
+ // descriptor (`property`) instead of calling `tp_descr_get` (`property.__get__()`).
+ PyObject *descr = _PyType_Lookup((PyTypeObject *) obj, name);
+
+ // The following assignment combinations are possible:
+ // 1. `Type.static_prop = value` --> descr_set: `Type.static_prop.__set__(value)`
+ // 2. `Type.static_prop = other_static_prop` --> setattro: replace existing `static_prop`
+ // 3. `Type.regular_attribute = value` --> setattro: regular attribute assignment
+ const auto static_prop = (PyObject *) get_internals().static_property_type;
+ const auto call_descr_set = descr && value && PyObject_IsInstance(descr, static_prop)
+ && !PyObject_IsInstance(value, static_prop);
+ if (call_descr_set) {
+ // Call `static_property.__set__()` instead of replacing the `static_property`.
+#if !defined(PYPY_VERSION)
+ return Py_TYPE(descr)->tp_descr_set(descr, obj, value);
+#else
+ if (PyObject *result = PyObject_CallMethod(descr, "__set__", "OO", obj, value)) {
+ Py_DECREF(result);
+ return 0;
+ } else {
+ return -1;
+ }
+#endif
+ } else {
+ // Replace existing attribute.
+ return PyType_Type.tp_setattro(obj, name, value);
+ }
+}
+
+#if PY_MAJOR_VERSION >= 3
+/**
+ * Python 3's PyInstanceMethod_Type hides itself via its tp_descr_get, which prevents aliasing
+ * methods via cls.attr("m2") = cls.attr("m1"): instead the tp_descr_get returns a plain function,
+ * when called on a class, or a PyMethod, when called on an instance. Override that behaviour here
+ * to do a special case bypass for PyInstanceMethod_Types.
+ */
+extern "C" inline PyObject *pybind11_meta_getattro(PyObject *obj, PyObject *name) {
+ PyObject *descr = _PyType_Lookup((PyTypeObject *) obj, name);
+ if (descr && PyInstanceMethod_Check(descr)) {
+ Py_INCREF(descr);
+ return descr;
+ }
+ else {
+ return PyType_Type.tp_getattro(obj, name);
+ }
+}
+#endif
+
+/// metaclass `__call__` function that is used to create all pybind11 objects.
+extern "C" inline PyObject *pybind11_meta_call(PyObject *type, PyObject *args, PyObject *kwargs) {
+
+ // use the default metaclass call to create/initialize the object
+ PyObject *self = PyType_Type.tp_call(type, args, kwargs);
+ if (self == nullptr) {
+ return nullptr;
+ }
+
+ // This must be a pybind11 instance
+ auto instance = reinterpret_cast<detail::instance *>(self);
+
+ // Ensure that the base __init__ function(s) were called
+ for (const auto &vh : values_and_holders(instance)) {
+ if (!vh.holder_constructed()) {
+ PyErr_Format(PyExc_TypeError, "%.200s.__init__() must be called when overriding __init__",
+ get_fully_qualified_tp_name(vh.type->type).c_str());
+ Py_DECREF(self);
+ return nullptr;
+ }
+ }
+
+ return self;
+}
+
+/// Cleanup the type-info for a pybind11-registered type.
+extern "C" inline void pybind11_meta_dealloc(PyObject *obj) {
+ auto *type = (PyTypeObject *) obj;
+ auto &internals = get_internals();
+
+ // A pybind11-registered type will:
+ // 1) be found in internals.registered_types_py
+ // 2) have exactly one associated `detail::type_info`
+ auto found_type = internals.registered_types_py.find(type);
+ if (found_type != internals.registered_types_py.end() &&
+ found_type->second.size() == 1 &&
+ found_type->second[0]->type == type) {
+
+ auto *tinfo = found_type->second[0];
+ auto tindex = std::type_index(*tinfo->cpptype);
+ internals.direct_conversions.erase(tindex);
+
+ if (tinfo->module_local)
+ registered_local_types_cpp().erase(tindex);
+ else
+ internals.registered_types_cpp.erase(tindex);
+ internals.registered_types_py.erase(tinfo->type);
+
+ // Actually just `std::erase_if`, but that's only available in C++20
+ auto &cache = internals.inactive_override_cache;
+ for (auto it = cache.begin(), last = cache.end(); it != last; ) {
+ if (it->first == (PyObject *) tinfo->type)
+ it = cache.erase(it);
+ else
+ ++it;
+ }
+
+ delete tinfo;
+ }
+
+ PyType_Type.tp_dealloc(obj);
+}
+
+/** This metaclass is assigned by default to all pybind11 types and is required in order
+ for static properties to function correctly. Users may override this using `py::metaclass`.
+ Return value: New reference. */
+inline PyTypeObject* make_default_metaclass() {
+ constexpr auto *name = "pybind11_type";
+ auto name_obj = reinterpret_steal<object>(PYBIND11_FROM_STRING(name));
+
+ /* Danger zone: from now (and until PyType_Ready), make sure to
+ issue no Python C API calls which could potentially invoke the
+ garbage collector (the GC will call type_traverse(), which will in
+ turn find the newly constructed type in an invalid state) */
+ auto heap_type = (PyHeapTypeObject *) PyType_Type.tp_alloc(&PyType_Type, 0);
+ if (!heap_type)
+ pybind11_fail("make_default_metaclass(): error allocating metaclass!");
+
+ heap_type->ht_name = name_obj.inc_ref().ptr();
+#ifdef PYBIND11_BUILTIN_QUALNAME
+ heap_type->ht_qualname = name_obj.inc_ref().ptr();
+#endif
+
+ auto type = &heap_type->ht_type;
+ type->tp_name = name;
+ type->tp_base = type_incref(&PyType_Type);
+ type->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE;
+
+ type->tp_call = pybind11_meta_call;
+
+ type->tp_setattro = pybind11_meta_setattro;
+#if PY_MAJOR_VERSION >= 3
+ type->tp_getattro = pybind11_meta_getattro;
+#endif
+
+ type->tp_dealloc = pybind11_meta_dealloc;
+
+ if (PyType_Ready(type) < 0)
+ pybind11_fail("make_default_metaclass(): failure in PyType_Ready()!");
+
+ setattr((PyObject *) type, "__module__", str("pybind11_builtins"));
+ PYBIND11_SET_OLDPY_QUALNAME(type, name_obj);
+
+ return type;
+}
+
+/// For multiple inheritance types we need to recursively register/deregister base pointers for any
+/// base classes with pointers that are difference from the instance value pointer so that we can
+/// correctly recognize an offset base class pointer. This calls a function with any offset base ptrs.
+inline void traverse_offset_bases(void *valueptr, const detail::type_info *tinfo, instance *self,
+ bool (*f)(void * /*parentptr*/, instance * /*self*/)) {
+ for (handle h : reinterpret_borrow<tuple>(tinfo->type->tp_bases)) {
+ if (auto parent_tinfo = get_type_info((PyTypeObject *) h.ptr())) {
+ for (auto &c : parent_tinfo->implicit_casts) {
+ if (c.first == tinfo->cpptype) {
+ auto *parentptr = c.second(valueptr);
+ if (parentptr != valueptr)
+ f(parentptr, self);
+ traverse_offset_bases(parentptr, parent_tinfo, self, f);
+ break;
+ }
+ }
+ }
+ }
+}
+
+inline bool register_instance_impl(void *ptr, instance *self) {
+ get_internals().registered_instances.emplace(ptr, self);
+ return true; // unused, but gives the same signature as the deregister func
+}
+inline bool deregister_instance_impl(void *ptr, instance *self) {
+ auto ®istered_instances = get_internals().registered_instances;
+ auto range = registered_instances.equal_range(ptr);
+ for (auto it = range.first; it != range.second; ++it) {
+ if (self == it->second) {
+ registered_instances.erase(it);
+ return true;
+ }
+ }
+ return false;
+}
+
+inline void register_instance(instance *self, void *valptr, const type_info *tinfo) {
+ register_instance_impl(valptr, self);
+ if (!tinfo->simple_ancestors)
+ traverse_offset_bases(valptr, tinfo, self, register_instance_impl);
+}
+
+inline bool deregister_instance(instance *self, void *valptr, const type_info *tinfo) {
+ bool ret = deregister_instance_impl(valptr, self);
+ if (!tinfo->simple_ancestors)
+ traverse_offset_bases(valptr, tinfo, self, deregister_instance_impl);
+ return ret;
+}
+
+/// Instance creation function for all pybind11 types. It allocates the internal instance layout for
+/// holding C++ objects and holders. Allocation is done lazily (the first time the instance is cast
+/// to a reference or pointer), and initialization is done by an `__init__` function.
+inline PyObject *make_new_instance(PyTypeObject *type) {
+#if defined(PYPY_VERSION)
+ // PyPy gets tp_basicsize wrong (issue 2482) under multiple inheritance when the first inherited
+ // object is a a plain Python type (i.e. not derived from an extension type). Fix it.
+ ssize_t instance_size = static_cast<ssize_t>(sizeof(instance));
+ if (type->tp_basicsize < instance_size) {
+ type->tp_basicsize = instance_size;
+ }
+#endif
+ PyObject *self = type->tp_alloc(type, 0);
+ auto inst = reinterpret_cast<instance *>(self);
+ // Allocate the value/holder internals:
+ inst->allocate_layout();
+
+ inst->owned = true;
+
+ return self;
+}
+
+/// Instance creation function for all pybind11 types. It only allocates space for the
+/// C++ object, but doesn't call the constructor -- an `__init__` function must do that.
+extern "C" inline PyObject *pybind11_object_new(PyTypeObject *type, PyObject *, PyObject *) {
+ return make_new_instance(type);
+}
+
+/// An `__init__` function constructs the C++ object. Users should provide at least one
+/// of these using `py::init` or directly with `.def(__init__, ...)`. Otherwise, the
+/// following default function will be used which simply throws an exception.
+extern "C" inline int pybind11_object_init(PyObject *self, PyObject *, PyObject *) {
+ PyTypeObject *type = Py_TYPE(self);
+ std::string msg = get_fully_qualified_tp_name(type) + ": No constructor defined!";
+ PyErr_SetString(PyExc_TypeError, msg.c_str());
+ return -1;
+}
+
+inline void add_patient(PyObject *nurse, PyObject *patient) {
+ auto &internals = get_internals();
+ auto instance = reinterpret_cast<detail::instance *>(nurse);
+ instance->has_patients = true;
+ Py_INCREF(patient);
+ internals.patients[nurse].push_back(patient);
+}
+
+inline void clear_patients(PyObject *self) {
+ auto instance = reinterpret_cast<detail::instance *>(self);
+ auto &internals = get_internals();
+ auto pos = internals.patients.find(self);
+ assert(pos != internals.patients.end());
+ // Clearing the patients can cause more Python code to run, which
+ // can invalidate the iterator. Extract the vector of patients
+ // from the unordered_map first.
+ auto patients = std::move(pos->second);
+ internals.patients.erase(pos);
+ instance->has_patients = false;
+ for (PyObject *&patient : patients)
+ Py_CLEAR(patient);
+}
+
+/// Clears all internal data from the instance and removes it from registered instances in
+/// preparation for deallocation.
+inline void clear_instance(PyObject *self) {
+ auto instance = reinterpret_cast<detail::instance *>(self);
+
+ // Deallocate any values/holders, if present:
+ for (auto &v_h : values_and_holders(instance)) {
+ if (v_h) {
+
+ // We have to deregister before we call dealloc because, for virtual MI types, we still
+ // need to be able to get the parent pointers.
+ if (v_h.instance_registered() && !deregister_instance(instance, v_h.value_ptr(), v_h.type))
+ pybind11_fail("pybind11_object_dealloc(): Tried to deallocate unregistered instance!");
+
+ if (instance->owned || v_h.holder_constructed())
+ v_h.type->dealloc(v_h);
+ }
+ }
+ // Deallocate the value/holder layout internals:
+ instance->deallocate_layout();
+
+ if (instance->weakrefs)
+ PyObject_ClearWeakRefs(self);
+
+ PyObject **dict_ptr = _PyObject_GetDictPtr(self);
+ if (dict_ptr)
+ Py_CLEAR(*dict_ptr);
+
+ if (instance->has_patients)
+ clear_patients(self);
+}
+
+/// Instance destructor function for all pybind11 types. It calls `type_info.dealloc`
+/// to destroy the C++ object itself, while the rest is Python bookkeeping.
+extern "C" inline void pybind11_object_dealloc(PyObject *self) {
+ clear_instance(self);
+
+ auto type = Py_TYPE(self);
+ type->tp_free(self);
+
+#if PY_VERSION_HEX < 0x03080000
+ // `type->tp_dealloc != pybind11_object_dealloc` means that we're being called
+ // as part of a derived type's dealloc, in which case we're not allowed to decref
+ // the type here. For cross-module compatibility, we shouldn't compare directly
+ // with `pybind11_object_dealloc`, but with the common one stashed in internals.
+ auto pybind11_object_type = (PyTypeObject *) get_internals().instance_base;
+ if (type->tp_dealloc == pybind11_object_type->tp_dealloc)
+ Py_DECREF(type);
+#else
+ // This was not needed before Python 3.8 (Python issue 35810)
+ // https://github.com/pybind/pybind11/issues/1946
+ Py_DECREF(type);
+#endif
+}
+
+/** Create the type which can be used as a common base for all classes. This is
+ needed in order to satisfy Python's requirements for multiple inheritance.
+ Return value: New reference. */
+inline PyObject *make_object_base_type(PyTypeObject *metaclass) {
+ constexpr auto *name = "pybind11_object";
+ auto name_obj = reinterpret_steal<object>(PYBIND11_FROM_STRING(name));
+
+ /* Danger zone: from now (and until PyType_Ready), make sure to
+ issue no Python C API calls which could potentially invoke the
+ garbage collector (the GC will call type_traverse(), which will in
+ turn find the newly constructed type in an invalid state) */
+ auto heap_type = (PyHeapTypeObject *) metaclass->tp_alloc(metaclass, 0);
+ if (!heap_type)
+ pybind11_fail("make_object_base_type(): error allocating type!");
+
+ heap_type->ht_name = name_obj.inc_ref().ptr();
+#ifdef PYBIND11_BUILTIN_QUALNAME
+ heap_type->ht_qualname = name_obj.inc_ref().ptr();
+#endif
+
+ auto type = &heap_type->ht_type;
+ type->tp_name = name;
+ type->tp_base = type_incref(&PyBaseObject_Type);
+ type->tp_basicsize = static_cast<ssize_t>(sizeof(instance));
+ type->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE;
+
+ type->tp_new = pybind11_object_new;
+ type->tp_init = pybind11_object_init;
+ type->tp_dealloc = pybind11_object_dealloc;
+
+ /* Support weak references (needed for the keep_alive feature) */
+ type->tp_weaklistoffset = offsetof(instance, weakrefs);
+
+ if (PyType_Ready(type) < 0)
+ pybind11_fail("PyType_Ready failed in make_object_base_type():" + error_string());
+
+ setattr((PyObject *) type, "__module__", str("pybind11_builtins"));
+ PYBIND11_SET_OLDPY_QUALNAME(type, name_obj);
+
+ assert(!PyType_HasFeature(type, Py_TPFLAGS_HAVE_GC));
+ return (PyObject *) heap_type;
+}
+
+/// dynamic_attr: Support for `d = instance.__dict__`.
+extern "C" inline PyObject *pybind11_get_dict(PyObject *self, void *) {
+ PyObject *&dict = *_PyObject_GetDictPtr(self);
+ if (!dict)
+ dict = PyDict_New();
+ Py_XINCREF(dict);
+ return dict;
+}
+
+/// dynamic_attr: Support for `instance.__dict__ = dict()`.
+extern "C" inline int pybind11_set_dict(PyObject *self, PyObject *new_dict, void *) {
+ if (!PyDict_Check(new_dict)) {
+ PyErr_Format(PyExc_TypeError, "__dict__ must be set to a dictionary, not a '%.200s'",
+ get_fully_qualified_tp_name(Py_TYPE(new_dict)).c_str());
+ return -1;
+ }
+ PyObject *&dict = *_PyObject_GetDictPtr(self);
+ Py_INCREF(new_dict);
+ Py_CLEAR(dict);
+ dict = new_dict;
+ return 0;
+}
+
+/// dynamic_attr: Allow the garbage collector to traverse the internal instance `__dict__`.
+extern "C" inline int pybind11_traverse(PyObject *self, visitproc visit, void *arg) {
+ PyObject *&dict = *_PyObject_GetDictPtr(self);
+ Py_VISIT(dict);
+ return 0;
+}
+
+/// dynamic_attr: Allow the GC to clear the dictionary.
+extern "C" inline int pybind11_clear(PyObject *self) {
+ PyObject *&dict = *_PyObject_GetDictPtr(self);
+ Py_CLEAR(dict);
+ return 0;
+}
+
+/// Give instances of this type a `__dict__` and opt into garbage collection.
+inline void enable_dynamic_attributes(PyHeapTypeObject *heap_type) {
+ auto type = &heap_type->ht_type;
+ type->tp_flags |= Py_TPFLAGS_HAVE_GC;
+ type->tp_dictoffset = type->tp_basicsize; // place dict at the end
+ type->tp_basicsize += (ssize_t)sizeof(PyObject *); // and allocate enough space for it
+ type->tp_traverse = pybind11_traverse;
+ type->tp_clear = pybind11_clear;
+
+ static PyGetSetDef getset[] = {
+ {const_cast<char*>("__dict__"), pybind11_get_dict, pybind11_set_dict, nullptr, nullptr},
+ {nullptr, nullptr, nullptr, nullptr, nullptr}
+ };
+ type->tp_getset = getset;
+}
+
+/// buffer_protocol: Fill in the view as specified by flags.
+extern "C" inline int pybind11_getbuffer(PyObject *obj, Py_buffer *view, int flags) {
+ // Look for a `get_buffer` implementation in this type's info or any bases (following MRO).
+ type_info *tinfo = nullptr;
+ for (auto type : reinterpret_borrow<tuple>(Py_TYPE(obj)->tp_mro)) {
+ tinfo = get_type_info((PyTypeObject *) type.ptr());
+ if (tinfo && tinfo->get_buffer)
+ break;
+ }
+ if (view == nullptr || !tinfo || !tinfo->get_buffer) {
+ if (view)
+ view->obj = nullptr;
+ PyErr_SetString(PyExc_BufferError, "pybind11_getbuffer(): Internal error");
+ return -1;
+ }
+ std::memset(view, 0, sizeof(Py_buffer));
+ buffer_info *info = tinfo->get_buffer(obj, tinfo->get_buffer_data);
+ view->obj = obj;
+ view->ndim = 1;
+ view->internal = info;
+ view->buf = info->ptr;
+ view->itemsize = info->itemsize;
+ view->len = view->itemsize;
+ for (auto s : info->shape)
+ view->len *= s;
+ view->readonly = info->readonly;
+ if ((flags & PyBUF_WRITABLE) == PyBUF_WRITABLE && info->readonly) {
+ if (view)
+ view->obj = nullptr;
+ PyErr_SetString(PyExc_BufferError, "Writable buffer requested for readonly storage");
+ return -1;
+ }
+ if ((flags & PyBUF_FORMAT) == PyBUF_FORMAT)
+ view->format = const_cast<char *>(info->format.c_str());
+ if ((flags & PyBUF_STRIDES) == PyBUF_STRIDES) {
+ view->ndim = (int) info->ndim;
+ view->strides = &info->strides[0];
+ view->shape = &info->shape[0];
+ }
+ Py_INCREF(view->obj);
+ return 0;
+}
+
+/// buffer_protocol: Release the resources of the buffer.
+extern "C" inline void pybind11_releasebuffer(PyObject *, Py_buffer *view) {
+ delete (buffer_info *) view->internal;
+}
+
+/// Give this type a buffer interface.
+inline void enable_buffer_protocol(PyHeapTypeObject *heap_type) {
+ heap_type->ht_type.tp_as_buffer = &heap_type->as_buffer;
+#if PY_MAJOR_VERSION < 3
+ heap_type->ht_type.tp_flags |= Py_TPFLAGS_HAVE_NEWBUFFER;
+#endif
+
+ heap_type->as_buffer.bf_getbuffer = pybind11_getbuffer;
+ heap_type->as_buffer.bf_releasebuffer = pybind11_releasebuffer;
+}
+
+/** Create a brand new Python type according to the `type_record` specification.
+ Return value: New reference. */
+inline PyObject* make_new_python_type(const type_record &rec) {
+ auto name = reinterpret_steal<object>(PYBIND11_FROM_STRING(rec.name));
+
+ auto qualname = name;
+ if (rec.scope && !PyModule_Check(rec.scope.ptr()) && hasattr(rec.scope, "__qualname__")) {
+#if PY_MAJOR_VERSION >= 3
+ qualname = reinterpret_steal<object>(
+ PyUnicode_FromFormat("%U.%U", rec.scope.attr("__qualname__").ptr(), name.ptr()));
+#else
+ qualname = str(rec.scope.attr("__qualname__").cast<std::string>() + "." + rec.name);
+#endif
+ }
+
+ object module_;
+ if (rec.scope) {
+ if (hasattr(rec.scope, "__module__"))
+ module_ = rec.scope.attr("__module__");
+ else if (hasattr(rec.scope, "__name__"))
+ module_ = rec.scope.attr("__name__");
+ }
+
+ auto full_name = c_str(
+#if !defined(PYPY_VERSION)
+ module_ ? str(module_).cast<std::string>() + "." + rec.name :
+#endif
+ rec.name);
+
+ char *tp_doc = nullptr;
+ if (rec.doc && options::show_user_defined_docstrings()) {
+ /* Allocate memory for docstring (using PyObject_MALLOC, since
+ Python will free this later on) */
+ size_t size = strlen(rec.doc) + 1;
+ tp_doc = (char *) PyObject_MALLOC(size);
+ memcpy((void *) tp_doc, rec.doc, size);
+ }
+
+ auto &internals = get_internals();
+ auto bases = tuple(rec.bases);
+ auto base = (bases.empty()) ? internals.instance_base
+ : bases[0].ptr();
+
+ /* Danger zone: from now (and until PyType_Ready), make sure to
+ issue no Python C API calls which could potentially invoke the
+ garbage collector (the GC will call type_traverse(), which will in
+ turn find the newly constructed type in an invalid state) */
+ auto metaclass = rec.metaclass.ptr() ? (PyTypeObject *) rec.metaclass.ptr()
+ : internals.default_metaclass;
+
+ auto heap_type = (PyHeapTypeObject *) metaclass->tp_alloc(metaclass, 0);
+ if (!heap_type)
+ pybind11_fail(std::string(rec.name) + ": Unable to create type object!");
+
+ heap_type->ht_name = name.release().ptr();
+#ifdef PYBIND11_BUILTIN_QUALNAME
+ heap_type->ht_qualname = qualname.inc_ref().ptr();
+#endif
+
+ auto type = &heap_type->ht_type;
+ type->tp_name = full_name;
+ type->tp_doc = tp_doc;
+ type->tp_base = type_incref((PyTypeObject *)base);
+ type->tp_basicsize = static_cast<ssize_t>(sizeof(instance));
+ if (!bases.empty())
+ type->tp_bases = bases.release().ptr();
+
+ /* Don't inherit base __init__ */
+ type->tp_init = pybind11_object_init;
+
+ /* Supported protocols */
+ type->tp_as_number = &heap_type->as_number;
+ type->tp_as_sequence = &heap_type->as_sequence;
+ type->tp_as_mapping = &heap_type->as_mapping;
+#if PY_VERSION_HEX >= 0x03050000
+ type->tp_as_async = &heap_type->as_async;
+#endif
+
+ /* Flags */
+ type->tp_flags |= Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HEAPTYPE;
+#if PY_MAJOR_VERSION < 3
+ type->tp_flags |= Py_TPFLAGS_CHECKTYPES;
+#endif
+ if (!rec.is_final)
+ type->tp_flags |= Py_TPFLAGS_BASETYPE;
+
+ if (rec.dynamic_attr)
+ enable_dynamic_attributes(heap_type);
+
+ if (rec.buffer_protocol)
+ enable_buffer_protocol(heap_type);
+
+ if (PyType_Ready(type) < 0)
+ pybind11_fail(std::string(rec.name) + ": PyType_Ready failed (" + error_string() + ")!");
+
+ assert(rec.dynamic_attr ? PyType_HasFeature(type, Py_TPFLAGS_HAVE_GC)
+ : !PyType_HasFeature(type, Py_TPFLAGS_HAVE_GC));
+
+ /* Register type with the parent scope */
+ if (rec.scope)
+ setattr(rec.scope, rec.name, (PyObject *) type);
+ else
+ Py_INCREF(type); // Keep it alive forever (reference leak)
+
+ if (module_) // Needed by pydoc
+ setattr((PyObject *) type, "__module__", module_);
+
+ PYBIND11_SET_OLDPY_QUALNAME(type, qualname);
+
+ return (PyObject *) type;
+}
+
+PYBIND11_NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
--- /dev/null
+/*
+ pybind11/detail/common.h -- Basic macros
+
+ Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>
+
+ All rights reserved. Use of this source code is governed by a
+ BSD-style license that can be found in the LICENSE file.
+*/
+
+#pragma once
+
+#define PYBIND11_VERSION_MAJOR 2
+#define PYBIND11_VERSION_MINOR 6
+#define PYBIND11_VERSION_PATCH 1
+
+#define PYBIND11_NAMESPACE_BEGIN(name) namespace name {
+#define PYBIND11_NAMESPACE_END(name) }
+
+// Robust support for some features and loading modules compiled against different pybind versions
+// requires forcing hidden visibility on pybind code, so we enforce this by setting the attribute on
+// the main `pybind11` namespace.
+#if !defined(PYBIND11_NAMESPACE)
+# ifdef __GNUG__
+# define PYBIND11_NAMESPACE pybind11 __attribute__((visibility("hidden")))
+# else
+# define PYBIND11_NAMESPACE pybind11
+# endif
+#endif
+
+#if !(defined(_MSC_VER) && __cplusplus == 199711L) && !defined(__INTEL_COMPILER)
+# if __cplusplus >= 201402L
+# define PYBIND11_CPP14
+# if __cplusplus >= 201703L
+# define PYBIND11_CPP17
+# endif
+# endif
+#elif defined(_MSC_VER) && __cplusplus == 199711L
+// MSVC sets _MSVC_LANG rather than __cplusplus (supposedly until the standard is fully implemented)
+// Unless you use the /Zc:__cplusplus flag on Visual Studio 2017 15.7 Preview 3 or newer
+# if _MSVC_LANG >= 201402L
+# define PYBIND11_CPP14
+# if _MSVC_LANG > 201402L && _MSC_VER >= 1910
+# define PYBIND11_CPP17
+# endif
+# endif
+#endif
+
+// Compiler version assertions
+#if defined(__INTEL_COMPILER)
+# if __INTEL_COMPILER < 1800
+# error pybind11 requires Intel C++ compiler v18 or newer
+# endif
+#elif defined(__clang__) && !defined(__apple_build_version__)
+# if __clang_major__ < 3 || (__clang_major__ == 3 && __clang_minor__ < 3)
+# error pybind11 requires clang 3.3 or newer
+# endif
+#elif defined(__clang__)
+// Apple changes clang version macros to its Xcode version; the first Xcode release based on
+// (upstream) clang 3.3 was Xcode 5:
+# if __clang_major__ < 5
+# error pybind11 requires Xcode/clang 5.0 or newer
+# endif
+#elif defined(__GNUG__)
+# if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 8)
+# error pybind11 requires gcc 4.8 or newer
+# endif
+#elif defined(_MSC_VER)
+// Pybind hits various compiler bugs in 2015u2 and earlier, and also makes use of some stl features
+// (e.g. std::negation) added in 2015u3:
+# if _MSC_FULL_VER < 190024210
+# error pybind11 requires MSVC 2015 update 3 or newer
+# endif
+#endif
+
+#if !defined(PYBIND11_EXPORT)
+# if defined(WIN32) || defined(_WIN32)
+# define PYBIND11_EXPORT __declspec(dllexport)
+# else
+# define PYBIND11_EXPORT __attribute__ ((visibility("default")))
+# endif
+#endif
+
+#if defined(_MSC_VER)
+# define PYBIND11_NOINLINE __declspec(noinline)
+#else
+# define PYBIND11_NOINLINE __attribute__ ((noinline))
+#endif
+
+#if defined(PYBIND11_CPP14)
+# define PYBIND11_DEPRECATED(reason) [[deprecated(reason)]]
+#else
+# define PYBIND11_DEPRECATED(reason) __attribute__((deprecated(reason)))
+#endif
+
+#if defined(PYBIND11_CPP17)
+# define PYBIND11_MAYBE_UNUSED [[maybe_unused]]
+#elif defined(_MSC_VER) && !defined(__clang__)
+# define PYBIND11_MAYBE_UNUSED
+#else
+# define PYBIND11_MAYBE_UNUSED __attribute__ ((__unused__))
+#endif
+
+/* Don't let Python.h #define (v)snprintf as macro because they are implemented
+ properly in Visual Studio since 2015. */
+#if defined(_MSC_VER) && _MSC_VER >= 1900
+# define HAVE_SNPRINTF 1
+#endif
+
+/// Include Python header, disable linking to pythonX_d.lib on Windows in debug mode
+#if defined(_MSC_VER)
+# if (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION < 4)
+# define HAVE_ROUND 1
+# endif
+# pragma warning(push)
+# pragma warning(disable: 4510 4610 4512 4005)
+# if defined(_DEBUG) && !defined(Py_DEBUG)
+# define PYBIND11_DEBUG_MARKER
+# undef _DEBUG
+# endif
+#endif
+
+#include <Python.h>
+#include <frameobject.h>
+#include <pythread.h>
+
+/* Python #defines overrides on all sorts of core functions, which
+ tends to weak havok in C++ codebases that expect these to work
+ like regular functions (potentially with several overloads) */
+#if defined(isalnum)
+# undef isalnum
+# undef isalpha
+# undef islower
+# undef isspace
+# undef isupper
+# undef tolower
+# undef toupper
+#endif
+
+#if defined(copysign)
+# undef copysign
+#endif
+
+#if defined(_MSC_VER)
+# if defined(PYBIND11_DEBUG_MARKER)
+# define _DEBUG
+# undef PYBIND11_DEBUG_MARKER
+# endif
+# pragma warning(pop)
+#endif
+
+#include <cstddef>
+#include <cstring>
+#include <forward_list>
+#include <vector>
+#include <string>
+#include <stdexcept>
+#include <exception>
+#include <unordered_set>
+#include <unordered_map>
+#include <memory>
+#include <typeindex>
+#include <type_traits>
+
+#if PY_MAJOR_VERSION >= 3 /// Compatibility macros for various Python versions
+#define PYBIND11_INSTANCE_METHOD_NEW(ptr, class_) PyInstanceMethod_New(ptr)
+#define PYBIND11_INSTANCE_METHOD_CHECK PyInstanceMethod_Check
+#define PYBIND11_INSTANCE_METHOD_GET_FUNCTION PyInstanceMethod_GET_FUNCTION
+#define PYBIND11_BYTES_CHECK PyBytes_Check
+#define PYBIND11_BYTES_FROM_STRING PyBytes_FromString
+#define PYBIND11_BYTES_FROM_STRING_AND_SIZE PyBytes_FromStringAndSize
+#define PYBIND11_BYTES_AS_STRING_AND_SIZE PyBytes_AsStringAndSize
+#define PYBIND11_BYTES_AS_STRING PyBytes_AsString
+#define PYBIND11_BYTES_SIZE PyBytes_Size
+#define PYBIND11_LONG_CHECK(o) PyLong_Check(o)
+#define PYBIND11_LONG_AS_LONGLONG(o) PyLong_AsLongLong(o)
+#define PYBIND11_LONG_FROM_SIGNED(o) PyLong_FromSsize_t((ssize_t) o)
+#define PYBIND11_LONG_FROM_UNSIGNED(o) PyLong_FromSize_t((size_t) o)
+#define PYBIND11_BYTES_NAME "bytes"
+#define PYBIND11_STRING_NAME "str"
+#define PYBIND11_SLICE_OBJECT PyObject
+#define PYBIND11_FROM_STRING PyUnicode_FromString
+#define PYBIND11_STR_TYPE ::pybind11::str
+#define PYBIND11_BOOL_ATTR "__bool__"
+#define PYBIND11_NB_BOOL(ptr) ((ptr)->nb_bool)
+#define PYBIND11_BUILTINS_MODULE "builtins"
+// Providing a separate declaration to make Clang's -Wmissing-prototypes happy.
+// See comment for PYBIND11_MODULE below for why this is marked "maybe unused".
+#define PYBIND11_PLUGIN_IMPL(name) \
+ extern "C" PYBIND11_MAYBE_UNUSED PYBIND11_EXPORT PyObject *PyInit_##name(); \
+ extern "C" PYBIND11_EXPORT PyObject *PyInit_##name()
+
+#else
+#define PYBIND11_INSTANCE_METHOD_NEW(ptr, class_) PyMethod_New(ptr, nullptr, class_)
+#define PYBIND11_INSTANCE_METHOD_CHECK PyMethod_Check
+#define PYBIND11_INSTANCE_METHOD_GET_FUNCTION PyMethod_GET_FUNCTION
+#define PYBIND11_BYTES_CHECK PyString_Check
+#define PYBIND11_BYTES_FROM_STRING PyString_FromString
+#define PYBIND11_BYTES_FROM_STRING_AND_SIZE PyString_FromStringAndSize
+#define PYBIND11_BYTES_AS_STRING_AND_SIZE PyString_AsStringAndSize
+#define PYBIND11_BYTES_AS_STRING PyString_AsString
+#define PYBIND11_BYTES_SIZE PyString_Size
+#define PYBIND11_LONG_CHECK(o) (PyInt_Check(o) || PyLong_Check(o))
+#define PYBIND11_LONG_AS_LONGLONG(o) (PyInt_Check(o) ? (long long) PyLong_AsLong(o) : PyLong_AsLongLong(o))
+#define PYBIND11_LONG_FROM_SIGNED(o) PyInt_FromSsize_t((ssize_t) o) // Returns long if needed.
+#define PYBIND11_LONG_FROM_UNSIGNED(o) PyInt_FromSize_t((size_t) o) // Returns long if needed.
+#define PYBIND11_BYTES_NAME "str"
+#define PYBIND11_STRING_NAME "unicode"
+#define PYBIND11_SLICE_OBJECT PySliceObject
+#define PYBIND11_FROM_STRING PyString_FromString
+#define PYBIND11_STR_TYPE ::pybind11::bytes
+#define PYBIND11_BOOL_ATTR "__nonzero__"
+#define PYBIND11_NB_BOOL(ptr) ((ptr)->nb_nonzero)
+#define PYBIND11_BUILTINS_MODULE "__builtin__"
+// Providing a separate PyInit decl to make Clang's -Wmissing-prototypes happy.
+// See comment for PYBIND11_MODULE below for why this is marked "maybe unused".
+#define PYBIND11_PLUGIN_IMPL(name) \
+ static PyObject *pybind11_init_wrapper(); \
+ extern "C" PYBIND11_MAYBE_UNUSED PYBIND11_EXPORT void init##name(); \
+ extern "C" PYBIND11_EXPORT void init##name() { \
+ (void)pybind11_init_wrapper(); \
+ } \
+ PyObject *pybind11_init_wrapper()
+#endif
+
+#if PY_VERSION_HEX >= 0x03050000 && PY_VERSION_HEX < 0x03050200
+extern "C" {
+ struct _Py_atomic_address { void *value; };
+ PyAPI_DATA(_Py_atomic_address) _PyThreadState_Current;
+}
+#endif
+
+#define PYBIND11_TRY_NEXT_OVERLOAD ((PyObject *) 1) // special failure return code
+#define PYBIND11_STRINGIFY(x) #x
+#define PYBIND11_TOSTRING(x) PYBIND11_STRINGIFY(x)
+#define PYBIND11_CONCAT(first, second) first##second
+#define PYBIND11_ENSURE_INTERNALS_READY \
+ pybind11::detail::get_internals();
+
+#define PYBIND11_CHECK_PYTHON_VERSION \
+ { \
+ const char *compiled_ver = PYBIND11_TOSTRING(PY_MAJOR_VERSION) \
+ "." PYBIND11_TOSTRING(PY_MINOR_VERSION); \
+ const char *runtime_ver = Py_GetVersion(); \
+ size_t len = std::strlen(compiled_ver); \
+ if (std::strncmp(runtime_ver, compiled_ver, len) != 0 \
+ || (runtime_ver[len] >= '0' && runtime_ver[len] <= '9')) { \
+ PyErr_Format(PyExc_ImportError, \
+ "Python version mismatch: module was compiled for Python %s, " \
+ "but the interpreter version is incompatible: %s.", \
+ compiled_ver, runtime_ver); \
+ return nullptr; \
+ } \
+ }
+
+#define PYBIND11_CATCH_INIT_EXCEPTIONS \
+ catch (pybind11::error_already_set &e) { \
+ PyErr_SetString(PyExc_ImportError, e.what()); \
+ return nullptr; \
+ } catch (const std::exception &e) { \
+ PyErr_SetString(PyExc_ImportError, e.what()); \
+ return nullptr; \
+ } \
+
+/** \rst
+ ***Deprecated in favor of PYBIND11_MODULE***
+
+ This macro creates the entry point that will be invoked when the Python interpreter
+ imports a plugin library. Please create a `module_` in the function body and return
+ the pointer to its underlying Python object at the end.
+
+ .. code-block:: cpp
+
+ PYBIND11_PLUGIN(example) {
+ pybind11::module_ m("example", "pybind11 example plugin");
+ /// Set up bindings here
+ return m.ptr();
+ }
+\endrst */
+#define PYBIND11_PLUGIN(name) \
+ PYBIND11_DEPRECATED("PYBIND11_PLUGIN is deprecated, use PYBIND11_MODULE") \
+ static PyObject *pybind11_init(); \
+ PYBIND11_PLUGIN_IMPL(name) { \
+ PYBIND11_CHECK_PYTHON_VERSION \
+ PYBIND11_ENSURE_INTERNALS_READY \
+ try { \
+ return pybind11_init(); \
+ } PYBIND11_CATCH_INIT_EXCEPTIONS \
+ } \
+ PyObject *pybind11_init()
+
+/** \rst
+ This macro creates the entry point that will be invoked when the Python interpreter
+ imports an extension module. The module name is given as the fist argument and it
+ should not be in quotes. The second macro argument defines a variable of type
+ `py::module_` which can be used to initialize the module.
+
+ The entry point is marked as "maybe unused" to aid dead-code detection analysis:
+ since the entry point is typically only looked up at runtime and not referenced
+ during translation, it would otherwise appear as unused ("dead") code.
+
+ .. code-block:: cpp
+
+ PYBIND11_MODULE(example, m) {
+ m.doc() = "pybind11 example module";
+
+ // Add bindings here
+ m.def("foo", []() {
+ return "Hello, World!";
+ });
+ }
+\endrst */
+#define PYBIND11_MODULE(name, variable) \
+ static ::pybind11::module_::module_def \
+ PYBIND11_CONCAT(pybind11_module_def_, name) PYBIND11_MAYBE_UNUSED; \
+ PYBIND11_MAYBE_UNUSED \
+ static void PYBIND11_CONCAT(pybind11_init_, name)(::pybind11::module_ &); \
+ PYBIND11_PLUGIN_IMPL(name) { \
+ PYBIND11_CHECK_PYTHON_VERSION \
+ PYBIND11_ENSURE_INTERNALS_READY \
+ auto m = ::pybind11::module_::create_extension_module( \
+ PYBIND11_TOSTRING(name), nullptr, \
+ &PYBIND11_CONCAT(pybind11_module_def_, name)); \
+ try { \
+ PYBIND11_CONCAT(pybind11_init_, name)(m); \
+ return m.ptr(); \
+ } PYBIND11_CATCH_INIT_EXCEPTIONS \
+ } \
+ void PYBIND11_CONCAT(pybind11_init_, name)(::pybind11::module_ &variable)
+
+
+PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
+
+using ssize_t = Py_ssize_t;
+using size_t = std::size_t;
+
+/// Approach used to cast a previously unknown C++ instance into a Python object
+enum class return_value_policy : uint8_t {
+ /** This is the default return value policy, which falls back to the policy
+ return_value_policy::take_ownership when the return value is a pointer.
+ Otherwise, it uses return_value::move or return_value::copy for rvalue
+ and lvalue references, respectively. See below for a description of what
+ all of these different policies do. */
+ automatic = 0,
+
+ /** As above, but use policy return_value_policy::reference when the return
+ value is a pointer. This is the default conversion policy for function
+ arguments when calling Python functions manually from C++ code (i.e. via
+ handle::operator()). You probably won't need to use this. */
+ automatic_reference,
+
+ /** Reference an existing object (i.e. do not create a new copy) and take
+ ownership. Python will call the destructor and delete operator when the
+ object’s reference count reaches zero. Undefined behavior ensues when
+ the C++ side does the same.. */
+ take_ownership,
+
+ /** Create a new copy of the returned object, which will be owned by
+ Python. This policy is comparably safe because the lifetimes of the two
+ instances are decoupled. */
+ copy,
+
+ /** Use std::move to move the return value contents into a new instance
+ that will be owned by Python. This policy is comparably safe because the
+ lifetimes of the two instances (move source and destination) are
+ decoupled. */
+ move,
+
+ /** Reference an existing object, but do not take ownership. The C++ side
+ is responsible for managing the object’s lifetime and deallocating it
+ when it is no longer used. Warning: undefined behavior will ensue when
+ the C++ side deletes an object that is still referenced and used by
+ Python. */
+ reference,
+
+ /** This policy only applies to methods and properties. It references the
+ object without taking ownership similar to the above
+ return_value_policy::reference policy. In contrast to that policy, the
+ function or property’s implicit this argument (called the parent) is
+ considered to be the the owner of the return value (the child).
+ pybind11 then couples the lifetime of the parent to the child via a
+ reference relationship that ensures that the parent cannot be garbage
+ collected while Python is still using the child. More advanced
+ variations of this scheme are also possible using combinations of
+ return_value_policy::reference and the keep_alive call policy */
+ reference_internal
+};
+
+PYBIND11_NAMESPACE_BEGIN(detail)
+
+inline static constexpr int log2(size_t n, int k = 0) { return (n <= 1) ? k : log2(n >> 1, k + 1); }
+
+// Returns the size as a multiple of sizeof(void *), rounded up.
+inline static constexpr size_t size_in_ptrs(size_t s) { return 1 + ((s - 1) >> log2(sizeof(void *))); }
+
+/**
+ * The space to allocate for simple layout instance holders (see below) in multiple of the size of
+ * a pointer (e.g. 2 means 16 bytes on 64-bit architectures). The default is the minimum required
+ * to holder either a std::unique_ptr or std::shared_ptr (which is almost always
+ * sizeof(std::shared_ptr<T>)).
+ */
+constexpr size_t instance_simple_holder_in_ptrs() {
+ static_assert(sizeof(std::shared_ptr<int>) >= sizeof(std::unique_ptr<int>),
+ "pybind assumes std::shared_ptrs are at least as big as std::unique_ptrs");
+ return size_in_ptrs(sizeof(std::shared_ptr<int>));
+}
+
+// Forward declarations
+struct type_info;
+struct value_and_holder;
+
+struct nonsimple_values_and_holders {
+ void **values_and_holders;
+ uint8_t *status;
+};
+
+/// The 'instance' type which needs to be standard layout (need to be able to use 'offsetof')
+struct instance {
+ PyObject_HEAD
+ /// Storage for pointers and holder; see simple_layout, below, for a description
+ union {
+ void *simple_value_holder[1 + instance_simple_holder_in_ptrs()];
+ nonsimple_values_and_holders nonsimple;
+ };
+ /// Weak references
+ PyObject *weakrefs;
+ /// If true, the pointer is owned which means we're free to manage it with a holder.
+ bool owned : 1;
+ /**
+ * An instance has two possible value/holder layouts.
+ *
+ * Simple layout (when this flag is true), means the `simple_value_holder` is set with a pointer
+ * and the holder object governing that pointer, i.e. [val1*][holder]. This layout is applied
+ * whenever there is no python-side multiple inheritance of bound C++ types *and* the type's
+ * holder will fit in the default space (which is large enough to hold either a std::unique_ptr
+ * or std::shared_ptr).
+ *
+ * Non-simple layout applies when using custom holders that require more space than `shared_ptr`
+ * (which is typically the size of two pointers), or when multiple inheritance is used on the
+ * python side. Non-simple layout allocates the required amount of memory to have multiple
+ * bound C++ classes as parents. Under this layout, `nonsimple.values_and_holders` is set to a
+ * pointer to allocated space of the required space to hold a sequence of value pointers and
+ * holders followed `status`, a set of bit flags (1 byte each), i.e.
+ * [val1*][holder1][val2*][holder2]...[bb...] where each [block] is rounded up to a multiple of
+ * `sizeof(void *)`. `nonsimple.status` is, for convenience, a pointer to the
+ * beginning of the [bb...] block (but not independently allocated).
+ *
+ * Status bits indicate whether the associated holder is constructed (&
+ * status_holder_constructed) and whether the value pointer is registered (&
+ * status_instance_registered) in `registered_instances`.
+ */
+ bool simple_layout : 1;
+ /// For simple layout, tracks whether the holder has been constructed
+ bool simple_holder_constructed : 1;
+ /// For simple layout, tracks whether the instance is registered in `registered_instances`
+ bool simple_instance_registered : 1;
+ /// If true, get_internals().patients has an entry for this object
+ bool has_patients : 1;
+
+ /// Initializes all of the above type/values/holders data (but not the instance values themselves)
+ void allocate_layout();
+
+ /// Destroys/deallocates all of the above
+ void deallocate_layout();
+
+ /// Returns the value_and_holder wrapper for the given type (or the first, if `find_type`
+ /// omitted). Returns a default-constructed (with `.inst = nullptr`) object on failure if
+ /// `throw_if_missing` is false.
+ value_and_holder get_value_and_holder(const type_info *find_type = nullptr, bool throw_if_missing = true);
+
+ /// Bit values for the non-simple status flags
+ static constexpr uint8_t status_holder_constructed = 1;
+ static constexpr uint8_t status_instance_registered = 2;
+};
+
+static_assert(std::is_standard_layout<instance>::value, "Internal error: `pybind11::detail::instance` is not standard layout!");
+
+/// from __cpp_future__ import (convenient aliases from C++14/17)
+#if defined(PYBIND11_CPP14) && (!defined(_MSC_VER) || _MSC_VER >= 1910)
+using std::enable_if_t;
+using std::conditional_t;
+using std::remove_cv_t;
+using std::remove_reference_t;
+#else
+template <bool B, typename T = void> using enable_if_t = typename std::enable_if<B, T>::type;
+template <bool B, typename T, typename F> using conditional_t = typename std::conditional<B, T, F>::type;
+template <typename T> using remove_cv_t = typename std::remove_cv<T>::type;
+template <typename T> using remove_reference_t = typename std::remove_reference<T>::type;
+#endif
+
+/// Index sequences
+#if defined(PYBIND11_CPP14)
+using std::index_sequence;
+using std::make_index_sequence;
+#else
+template<size_t ...> struct index_sequence { };
+template<size_t N, size_t ...S> struct make_index_sequence_impl : make_index_sequence_impl <N - 1, N - 1, S...> { };
+template<size_t ...S> struct make_index_sequence_impl <0, S...> { using type = index_sequence<S...>; };
+template<size_t N> using make_index_sequence = typename make_index_sequence_impl<N>::type;
+#endif
+
+/// Make an index sequence of the indices of true arguments
+template <typename ISeq, size_t, bool...> struct select_indices_impl { using type = ISeq; };
+template <size_t... IPrev, size_t I, bool B, bool... Bs> struct select_indices_impl<index_sequence<IPrev...>, I, B, Bs...>
+ : select_indices_impl<conditional_t<B, index_sequence<IPrev..., I>, index_sequence<IPrev...>>, I + 1, Bs...> {};
+template <bool... Bs> using select_indices = typename select_indices_impl<index_sequence<>, 0, Bs...>::type;
+
+/// Backports of std::bool_constant and std::negation to accommodate older compilers
+template <bool B> using bool_constant = std::integral_constant<bool, B>;
+template <typename T> struct negation : bool_constant<!T::value> { };
+
+// PGI/Intel cannot detect operator delete with the "compatible" void_t impl, so
+// using the new one (C++14 defect, so generally works on newer compilers, even
+// if not in C++17 mode)
+#if defined(__PGIC__) || defined(__INTEL_COMPILER)
+template<typename... > using void_t = void;
+#else
+template <typename...> struct void_t_impl { using type = void; };
+template <typename... Ts> using void_t = typename void_t_impl<Ts...>::type;
+#endif
+
+
+/// Compile-time all/any/none of that check the boolean value of all template types
+#if defined(__cpp_fold_expressions) && !(defined(_MSC_VER) && (_MSC_VER < 1916))
+template <class... Ts> using all_of = bool_constant<(Ts::value && ...)>;
+template <class... Ts> using any_of = bool_constant<(Ts::value || ...)>;
+#elif !defined(_MSC_VER)
+template <bool...> struct bools {};
+template <class... Ts> using all_of = std::is_same<
+ bools<Ts::value..., true>,
+ bools<true, Ts::value...>>;
+template <class... Ts> using any_of = negation<all_of<negation<Ts>...>>;
+#else
+// MSVC has trouble with the above, but supports std::conjunction, which we can use instead (albeit
+// at a slight loss of compilation efficiency).
+template <class... Ts> using all_of = std::conjunction<Ts...>;
+template <class... Ts> using any_of = std::disjunction<Ts...>;
+#endif
+template <class... Ts> using none_of = negation<any_of<Ts...>>;
+
+template <class T, template<class> class... Predicates> using satisfies_all_of = all_of<Predicates<T>...>;
+template <class T, template<class> class... Predicates> using satisfies_any_of = any_of<Predicates<T>...>;
+template <class T, template<class> class... Predicates> using satisfies_none_of = none_of<Predicates<T>...>;
+
+/// Strip the class from a method type
+template <typename T> struct remove_class { };
+template <typename C, typename R, typename... A> struct remove_class<R (C::*)(A...)> { using type = R (A...); };
+template <typename C, typename R, typename... A> struct remove_class<R (C::*)(A...) const> { using type = R (A...); };
+
+/// Helper template to strip away type modifiers
+template <typename T> struct intrinsic_type { using type = T; };
+template <typename T> struct intrinsic_type<const T> { using type = typename intrinsic_type<T>::type; };
+template <typename T> struct intrinsic_type<T*> { using type = typename intrinsic_type<T>::type; };
+template <typename T> struct intrinsic_type<T&> { using type = typename intrinsic_type<T>::type; };
+template <typename T> struct intrinsic_type<T&&> { using type = typename intrinsic_type<T>::type; };
+template <typename T, size_t N> struct intrinsic_type<const T[N]> { using type = typename intrinsic_type<T>::type; };
+template <typename T, size_t N> struct intrinsic_type<T[N]> { using type = typename intrinsic_type<T>::type; };
+template <typename T> using intrinsic_t = typename intrinsic_type<T>::type;
+
+/// Helper type to replace 'void' in some expressions
+struct void_type { };
+
+/// Helper template which holds a list of types
+template <typename...> struct type_list { };
+
+/// Compile-time integer sum
+#ifdef __cpp_fold_expressions
+template <typename... Ts> constexpr size_t constexpr_sum(Ts... ns) { return (0 + ... + size_t{ns}); }
+#else
+constexpr size_t constexpr_sum() { return 0; }
+template <typename T, typename... Ts>
+constexpr size_t constexpr_sum(T n, Ts... ns) { return size_t{n} + constexpr_sum(ns...); }
+#endif
+
+PYBIND11_NAMESPACE_BEGIN(constexpr_impl)
+/// Implementation details for constexpr functions
+constexpr int first(int i) { return i; }
+template <typename T, typename... Ts>
+constexpr int first(int i, T v, Ts... vs) { return v ? i : first(i + 1, vs...); }
+
+constexpr int last(int /*i*/, int result) { return result; }
+template <typename T, typename... Ts>
+constexpr int last(int i, int result, T v, Ts... vs) { return last(i + 1, v ? i : result, vs...); }
+PYBIND11_NAMESPACE_END(constexpr_impl)
+
+/// Return the index of the first type in Ts which satisfies Predicate<T>. Returns sizeof...(Ts) if
+/// none match.
+template <template<typename> class Predicate, typename... Ts>
+constexpr int constexpr_first() { return constexpr_impl::first(0, Predicate<Ts>::value...); }
+
+/// Return the index of the last type in Ts which satisfies Predicate<T>, or -1 if none match.
+template <template<typename> class Predicate, typename... Ts>
+constexpr int constexpr_last() { return constexpr_impl::last(0, -1, Predicate<Ts>::value...); }
+
+/// Return the Nth element from the parameter pack
+template <size_t N, typename T, typename... Ts>
+struct pack_element { using type = typename pack_element<N - 1, Ts...>::type; };
+template <typename T, typename... Ts>
+struct pack_element<0, T, Ts...> { using type = T; };
+
+/// Return the one and only type which matches the predicate, or Default if none match.
+/// If more than one type matches the predicate, fail at compile-time.
+template <template<typename> class Predicate, typename Default, typename... Ts>
+struct exactly_one {
+ static constexpr auto found = constexpr_sum(Predicate<Ts>::value...);
+ static_assert(found <= 1, "Found more than one type matching the predicate");
+
+ static constexpr auto index = found ? constexpr_first<Predicate, Ts...>() : 0;
+ using type = conditional_t<found, typename pack_element<index, Ts...>::type, Default>;
+};
+template <template<typename> class P, typename Default>
+struct exactly_one<P, Default> { using type = Default; };
+
+template <template<typename> class Predicate, typename Default, typename... Ts>
+using exactly_one_t = typename exactly_one<Predicate, Default, Ts...>::type;
+
+/// Defer the evaluation of type T until types Us are instantiated
+template <typename T, typename... /*Us*/> struct deferred_type { using type = T; };
+template <typename T, typename... Us> using deferred_t = typename deferred_type<T, Us...>::type;
+
+/// Like is_base_of, but requires a strict base (i.e. `is_strict_base_of<T, T>::value == false`,
+/// unlike `std::is_base_of`)
+template <typename Base, typename Derived> using is_strict_base_of = bool_constant<
+ std::is_base_of<Base, Derived>::value && !std::is_same<Base, Derived>::value>;
+
+/// Like is_base_of, but also requires that the base type is accessible (i.e. that a Derived pointer
+/// can be converted to a Base pointer)
+/// For unions, `is_base_of<T, T>::value` is False, so we need to check `is_same` as well.
+template <typename Base, typename Derived> using is_accessible_base_of = bool_constant<
+ (std::is_same<Base, Derived>::value || std::is_base_of<Base, Derived>::value) && std::is_convertible<Derived *, Base *>::value>;
+
+template <template<typename...> class Base>
+struct is_template_base_of_impl {
+ template <typename... Us> static std::true_type check(Base<Us...> *);
+ static std::false_type check(...);
+};
+
+/// Check if a template is the base of a type. For example:
+/// `is_template_base_of<Base, T>` is true if `struct T : Base<U> {}` where U can be anything
+template <template<typename...> class Base, typename T>
+#if !defined(_MSC_VER)
+using is_template_base_of = decltype(is_template_base_of_impl<Base>::check((intrinsic_t<T>*)nullptr));
+#else // MSVC2015 has trouble with decltype in template aliases
+struct is_template_base_of : decltype(is_template_base_of_impl<Base>::check((intrinsic_t<T>*)nullptr)) { };
+#endif
+
+/// Check if T is an instantiation of the template `Class`. For example:
+/// `is_instantiation<shared_ptr, T>` is true if `T == shared_ptr<U>` where U can be anything.
+template <template<typename...> class Class, typename T>
+struct is_instantiation : std::false_type { };
+template <template<typename...> class Class, typename... Us>
+struct is_instantiation<Class, Class<Us...>> : std::true_type { };
+
+/// Check if T is std::shared_ptr<U> where U can be anything
+template <typename T> using is_shared_ptr = is_instantiation<std::shared_ptr, T>;
+
+/// Check if T looks like an input iterator
+template <typename T, typename = void> struct is_input_iterator : std::false_type {};
+template <typename T>
+struct is_input_iterator<T, void_t<decltype(*std::declval<T &>()), decltype(++std::declval<T &>())>>
+ : std::true_type {};
+
+template <typename T> using is_function_pointer = bool_constant<
+ std::is_pointer<T>::value && std::is_function<typename std::remove_pointer<T>::type>::value>;
+
+template <typename F> struct strip_function_object {
+ using type = typename remove_class<decltype(&F::operator())>::type;
+};
+
+// Extracts the function signature from a function, function pointer or lambda.
+template <typename Function, typename F = remove_reference_t<Function>>
+using function_signature_t = conditional_t<
+ std::is_function<F>::value,
+ F,
+ typename conditional_t<
+ std::is_pointer<F>::value || std::is_member_pointer<F>::value,
+ std::remove_pointer<F>,
+ strip_function_object<F>
+ >::type
+>;
+
+/// Returns true if the type looks like a lambda: that is, isn't a function, pointer or member
+/// pointer. Note that this can catch all sorts of other things, too; this is intended to be used
+/// in a place where passing a lambda makes sense.
+template <typename T> using is_lambda = satisfies_none_of<remove_reference_t<T>,
+ std::is_function, std::is_pointer, std::is_member_pointer>;
+
+/// Ignore that a variable is unused in compiler warnings
+inline void ignore_unused(const int *) { }
+
+/// Apply a function over each element of a parameter pack
+#ifdef __cpp_fold_expressions
+#define PYBIND11_EXPAND_SIDE_EFFECTS(PATTERN) (((PATTERN), void()), ...)
+#else
+using expand_side_effects = bool[];
+#define PYBIND11_EXPAND_SIDE_EFFECTS(PATTERN) (void)pybind11::detail::expand_side_effects{ ((PATTERN), void(), false)..., false }
+#endif
+
+PYBIND11_NAMESPACE_END(detail)
+
+/// C++ bindings of builtin Python exceptions
+class builtin_exception : public std::runtime_error {
+public:
+ using std::runtime_error::runtime_error;
+ /// Set the error using the Python C API
+ virtual void set_error() const = 0;
+};
+
+#define PYBIND11_RUNTIME_EXCEPTION(name, type) \
+ class name : public builtin_exception { public: \
+ using builtin_exception::builtin_exception; \
+ name() : name("") { } \
+ void set_error() const override { PyErr_SetString(type, what()); } \
+ };
+
+PYBIND11_RUNTIME_EXCEPTION(stop_iteration, PyExc_StopIteration)
+PYBIND11_RUNTIME_EXCEPTION(index_error, PyExc_IndexError)
+PYBIND11_RUNTIME_EXCEPTION(key_error, PyExc_KeyError)
+PYBIND11_RUNTIME_EXCEPTION(value_error, PyExc_ValueError)
+PYBIND11_RUNTIME_EXCEPTION(type_error, PyExc_TypeError)
+PYBIND11_RUNTIME_EXCEPTION(buffer_error, PyExc_BufferError)
+PYBIND11_RUNTIME_EXCEPTION(import_error, PyExc_ImportError)
+PYBIND11_RUNTIME_EXCEPTION(cast_error, PyExc_RuntimeError) /// Thrown when pybind11::cast or handle::call fail due to a type casting error
+PYBIND11_RUNTIME_EXCEPTION(reference_cast_error, PyExc_RuntimeError) /// Used internally
+
+[[noreturn]] PYBIND11_NOINLINE inline void pybind11_fail(const char *reason) { throw std::runtime_error(reason); }
+[[noreturn]] PYBIND11_NOINLINE inline void pybind11_fail(const std::string &reason) { throw std::runtime_error(reason); }
+
+template <typename T, typename SFINAE = void> struct format_descriptor { };
+
+PYBIND11_NAMESPACE_BEGIN(detail)
+// Returns the index of the given type in the type char array below, and in the list in numpy.h
+// The order here is: bool; 8 ints ((signed,unsigned)x(8,16,32,64)bits); float,double,long double;
+// complex float,double,long double. Note that the long double types only participate when long
+// double is actually longer than double (it isn't under MSVC).
+// NB: not only the string below but also complex.h and numpy.h rely on this order.
+template <typename T, typename SFINAE = void> struct is_fmt_numeric { static constexpr bool value = false; };
+template <typename T> struct is_fmt_numeric<T, enable_if_t<std::is_arithmetic<T>::value>> {
+ static constexpr bool value = true;
+ static constexpr int index = std::is_same<T, bool>::value ? 0 : 1 + (
+ std::is_integral<T>::value ? detail::log2(sizeof(T))*2 + std::is_unsigned<T>::value : 8 + (
+ std::is_same<T, double>::value ? 1 : std::is_same<T, long double>::value ? 2 : 0));
+};
+PYBIND11_NAMESPACE_END(detail)
+
+template <typename T> struct format_descriptor<T, detail::enable_if_t<std::is_arithmetic<T>::value>> {
+ static constexpr const char c = "?bBhHiIqQfdg"[detail::is_fmt_numeric<T>::index];
+ static constexpr const char value[2] = { c, '\0' };
+ static std::string format() { return std::string(1, c); }
+};
+
+#if !defined(PYBIND11_CPP17)
+
+template <typename T> constexpr const char format_descriptor<
+ T, detail::enable_if_t<std::is_arithmetic<T>::value>>::value[2];
+
+#endif
+
+/// RAII wrapper that temporarily clears any Python error state
+struct error_scope {
+ PyObject *type, *value, *trace;
+ error_scope() { PyErr_Fetch(&type, &value, &trace); }
+ ~error_scope() { PyErr_Restore(type, value, trace); }
+};
+
+/// Dummy destructor wrapper that can be used to expose classes with a private destructor
+struct nodelete { template <typename T> void operator()(T*) { } };
+
+PYBIND11_NAMESPACE_BEGIN(detail)
+template <typename... Args>
+struct overload_cast_impl {
+ constexpr overload_cast_impl() {}; // NOLINT(modernize-use-equals-default): MSVC 2015 needs this
+
+ template <typename Return>
+ constexpr auto operator()(Return (*pf)(Args...)) const noexcept
+ -> decltype(pf) { return pf; }
+
+ template <typename Return, typename Class>
+ constexpr auto operator()(Return (Class::*pmf)(Args...), std::false_type = {}) const noexcept
+ -> decltype(pmf) { return pmf; }
+
+ template <typename Return, typename Class>
+ constexpr auto operator()(Return (Class::*pmf)(Args...) const, std::true_type) const noexcept
+ -> decltype(pmf) { return pmf; }
+};
+PYBIND11_NAMESPACE_END(detail)
+
+// overload_cast requires variable templates: C++14
+#if defined(PYBIND11_CPP14)
+#define PYBIND11_OVERLOAD_CAST 1
+/// Syntax sugar for resolving overloaded function pointers:
+/// - regular: static_cast<Return (Class::*)(Arg0, Arg1, Arg2)>(&Class::func)
+/// - sweet: overload_cast<Arg0, Arg1, Arg2>(&Class::func)
+template <typename... Args>
+static constexpr detail::overload_cast_impl<Args...> overload_cast = {};
+// MSVC 2015 only accepts this particular initialization syntax for this variable template.
+#endif
+
+/// Const member function selector for overload_cast
+/// - regular: static_cast<Return (Class::*)(Arg) const>(&Class::func)
+/// - sweet: overload_cast<Arg>(&Class::func, const_)
+static constexpr auto const_ = std::true_type{};
+
+#if !defined(PYBIND11_CPP14) // no overload_cast: providing something that static_assert-fails:
+template <typename... Args> struct overload_cast {
+ static_assert(detail::deferred_t<std::false_type, Args...>::value,
+ "pybind11::overload_cast<...> requires compiling in C++14 mode");
+};
+#endif // overload_cast
+
+PYBIND11_NAMESPACE_BEGIN(detail)
+
+// Adaptor for converting arbitrary container arguments into a vector; implicitly convertible from
+// any standard container (or C-style array) supporting std::begin/std::end, any singleton
+// arithmetic type (if T is arithmetic), or explicitly constructible from an iterator pair.
+template <typename T>
+class any_container {
+ std::vector<T> v;
+public:
+ any_container() = default;
+
+ // Can construct from a pair of iterators
+ template <typename It, typename = enable_if_t<is_input_iterator<It>::value>>
+ any_container(It first, It last) : v(first, last) { }
+
+ // Implicit conversion constructor from any arbitrary container type with values convertible to T
+ template <typename Container, typename = enable_if_t<std::is_convertible<decltype(*std::begin(std::declval<const Container &>())), T>::value>>
+ any_container(const Container &c) : any_container(std::begin(c), std::end(c)) { }
+
+ // initializer_list's aren't deducible, so don't get matched by the above template; we need this
+ // to explicitly allow implicit conversion from one:
+ template <typename TIn, typename = enable_if_t<std::is_convertible<TIn, T>::value>>
+ any_container(const std::initializer_list<TIn> &c) : any_container(c.begin(), c.end()) { }
+
+ // Avoid copying if given an rvalue vector of the correct type.
+ any_container(std::vector<T> &&v) : v(std::move(v)) { }
+
+ // Moves the vector out of an rvalue any_container
+ operator std::vector<T> &&() && { return std::move(v); }
+
+ // Dereferencing obtains a reference to the underlying vector
+ std::vector<T> &operator*() { return v; }
+ const std::vector<T> &operator*() const { return v; }
+
+ // -> lets you call methods on the underlying vector
+ std::vector<T> *operator->() { return &v; }
+ const std::vector<T> *operator->() const { return &v; }
+};
+
+// Forward-declaration; see detail/class.h
+std::string get_fully_qualified_tp_name(PyTypeObject*);
+
+PYBIND11_NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
--- /dev/null
+/*
+ pybind11/detail/descr.h: Helper type for concatenating type signatures at compile time
+
+ Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>
+
+ All rights reserved. Use of this source code is governed by a
+ BSD-style license that can be found in the LICENSE file.
+*/
+
+#pragma once
+
+#include "common.h"
+
+PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_BEGIN(detail)
+
+#if !defined(_MSC_VER)
+# define PYBIND11_DESCR_CONSTEXPR static constexpr
+#else
+# define PYBIND11_DESCR_CONSTEXPR const
+#endif
+
+/* Concatenate type signatures at compile time */
+template <size_t N, typename... Ts>
+struct descr {
+ char text[N + 1];
+
+ constexpr descr() : text{'\0'} { }
+ constexpr descr(char const (&s)[N+1]) : descr(s, make_index_sequence<N>()) { }
+
+ template <size_t... Is>
+ constexpr descr(char const (&s)[N+1], index_sequence<Is...>) : text{s[Is]..., '\0'} { }
+
+ template <typename... Chars>
+ constexpr descr(char c, Chars... cs) : text{c, static_cast<char>(cs)..., '\0'} { }
+
+ static constexpr std::array<const std::type_info *, sizeof...(Ts) + 1> types() {
+ return {{&typeid(Ts)..., nullptr}};
+ }
+};
+
+template <size_t N1, size_t N2, typename... Ts1, typename... Ts2, size_t... Is1, size_t... Is2>
+constexpr descr<N1 + N2, Ts1..., Ts2...> plus_impl(const descr<N1, Ts1...> &a, const descr<N2, Ts2...> &b,
+ index_sequence<Is1...>, index_sequence<Is2...>) {
+ return {a.text[Is1]..., b.text[Is2]...};
+}
+
+template <size_t N1, size_t N2, typename... Ts1, typename... Ts2>
+constexpr descr<N1 + N2, Ts1..., Ts2...> operator+(const descr<N1, Ts1...> &a, const descr<N2, Ts2...> &b) {
+ return plus_impl(a, b, make_index_sequence<N1>(), make_index_sequence<N2>());
+}
+
+template <size_t N>
+constexpr descr<N - 1> _(char const(&text)[N]) { return descr<N - 1>(text); }
+constexpr descr<0> _(char const(&)[1]) { return {}; }
+
+template <size_t Rem, size_t... Digits> struct int_to_str : int_to_str<Rem/10, Rem%10, Digits...> { };
+template <size_t...Digits> struct int_to_str<0, Digits...> {
+ static constexpr auto digits = descr<sizeof...(Digits)>(('0' + Digits)...);
+};
+
+// Ternary description (like std::conditional)
+template <bool B, size_t N1, size_t N2>
+constexpr enable_if_t<B, descr<N1 - 1>> _(char const(&text1)[N1], char const(&)[N2]) {
+ return _(text1);
+}
+template <bool B, size_t N1, size_t N2>
+constexpr enable_if_t<!B, descr<N2 - 1>> _(char const(&)[N1], char const(&text2)[N2]) {
+ return _(text2);
+}
+
+template <bool B, typename T1, typename T2>
+constexpr enable_if_t<B, T1> _(const T1 &d, const T2 &) { return d; }
+template <bool B, typename T1, typename T2>
+constexpr enable_if_t<!B, T2> _(const T1 &, const T2 &d) { return d; }
+
+template <size_t Size> auto constexpr _() -> decltype(int_to_str<Size / 10, Size % 10>::digits) {
+ return int_to_str<Size / 10, Size % 10>::digits;
+}
+
+template <typename Type> constexpr descr<1, Type> _() { return {'%'}; }
+
+constexpr descr<0> concat() { return {}; }
+
+template <size_t N, typename... Ts>
+constexpr descr<N, Ts...> concat(const descr<N, Ts...> &descr) { return descr; }
+
+template <size_t N, typename... Ts, typename... Args>
+constexpr auto concat(const descr<N, Ts...> &d, const Args &...args)
+ -> decltype(std::declval<descr<N + 2, Ts...>>() + concat(args...)) {
+ return d + _(", ") + concat(args...);
+}
+
+template <size_t N, typename... Ts>
+constexpr descr<N + 2, Ts...> type_descr(const descr<N, Ts...> &descr) {
+ return _("{") + descr + _("}");
+}
+
+PYBIND11_NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
--- /dev/null
+/*
+ pybind11/detail/init.h: init factory function implementation and support code.
+
+ Copyright (c) 2017 Jason Rhinelander <jason@imaginary.ca>
+
+ All rights reserved. Use of this source code is governed by a
+ BSD-style license that can be found in the LICENSE file.
+*/
+
+#pragma once
+
+#include "class.h"
+
+PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_BEGIN(detail)
+
+template <>
+class type_caster<value_and_holder> {
+public:
+ bool load(handle h, bool) {
+ value = reinterpret_cast<value_and_holder *>(h.ptr());
+ return true;
+ }
+
+ template <typename> using cast_op_type = value_and_holder &;
+ operator value_and_holder &() { return *value; }
+ static constexpr auto name = _<value_and_holder>();
+
+private:
+ value_and_holder *value = nullptr;
+};
+
+PYBIND11_NAMESPACE_BEGIN(initimpl)
+
+inline void no_nullptr(void *ptr) {
+ if (!ptr) throw type_error("pybind11::init(): factory function returned nullptr");
+}
+
+// Implementing functions for all forms of py::init<...> and py::init(...)
+template <typename Class> using Cpp = typename Class::type;
+template <typename Class> using Alias = typename Class::type_alias;
+template <typename Class> using Holder = typename Class::holder_type;
+
+template <typename Class> using is_alias_constructible = std::is_constructible<Alias<Class>, Cpp<Class> &&>;
+
+// Takes a Cpp pointer and returns true if it actually is a polymorphic Alias instance.
+template <typename Class, enable_if_t<Class::has_alias, int> = 0>
+bool is_alias(Cpp<Class> *ptr) {
+ return dynamic_cast<Alias<Class> *>(ptr) != nullptr;
+}
+// Failing fallback version of the above for a no-alias class (always returns false)
+template <typename /*Class*/>
+constexpr bool is_alias(void *) { return false; }
+
+// Constructs and returns a new object; if the given arguments don't map to a constructor, we fall
+// back to brace aggregate initiailization so that for aggregate initialization can be used with
+// py::init, e.g. `py::init<int, int>` to initialize a `struct T { int a; int b; }`. For
+// non-aggregate types, we need to use an ordinary T(...) constructor (invoking as `T{...}` usually
+// works, but will not do the expected thing when `T` has an `initializer_list<T>` constructor).
+template <typename Class, typename... Args, detail::enable_if_t<std::is_constructible<Class, Args...>::value, int> = 0>
+inline Class *construct_or_initialize(Args &&...args) { return new Class(std::forward<Args>(args)...); }
+template <typename Class, typename... Args, detail::enable_if_t<!std::is_constructible<Class, Args...>::value, int> = 0>
+inline Class *construct_or_initialize(Args &&...args) { return new Class{std::forward<Args>(args)...}; }
+
+// Attempts to constructs an alias using a `Alias(Cpp &&)` constructor. This allows types with
+// an alias to provide only a single Cpp factory function as long as the Alias can be
+// constructed from an rvalue reference of the base Cpp type. This means that Alias classes
+// can, when appropriate, simply define a `Alias(Cpp &&)` constructor rather than needing to
+// inherit all the base class constructors.
+template <typename Class>
+void construct_alias_from_cpp(std::true_type /*is_alias_constructible*/,
+ value_and_holder &v_h, Cpp<Class> &&base) {
+ v_h.value_ptr() = new Alias<Class>(std::move(base));
+}
+template <typename Class>
+[[noreturn]] void construct_alias_from_cpp(std::false_type /*!is_alias_constructible*/,
+ value_and_holder &, Cpp<Class> &&) {
+ throw type_error("pybind11::init(): unable to convert returned instance to required "
+ "alias class: no `Alias<Class>(Class &&)` constructor available");
+}
+
+// Error-generating fallback for factories that don't match one of the below construction
+// mechanisms.
+template <typename Class>
+void construct(...) {
+ static_assert(!std::is_same<Class, Class>::value /* always false */,
+ "pybind11::init(): init function must return a compatible pointer, "
+ "holder, or value");
+}
+
+// Pointer return v1: the factory function returns a class pointer for a registered class.
+// If we don't need an alias (because this class doesn't have one, or because the final type is
+// inherited on the Python side) we can simply take over ownership. Otherwise we need to try to
+// construct an Alias from the returned base instance.
+template <typename Class>
+void construct(value_and_holder &v_h, Cpp<Class> *ptr, bool need_alias) {
+ no_nullptr(ptr);
+ if (Class::has_alias && need_alias && !is_alias<Class>(ptr)) {
+ // We're going to try to construct an alias by moving the cpp type. Whether or not
+ // that succeeds, we still need to destroy the original cpp pointer (either the
+ // moved away leftover, if the alias construction works, or the value itself if we
+ // throw an error), but we can't just call `delete ptr`: it might have a special
+ // deleter, or might be shared_from_this. So we construct a holder around it as if
+ // it was a normal instance, then steal the holder away into a local variable; thus
+ // the holder and destruction happens when we leave the C++ scope, and the holder
+ // class gets to handle the destruction however it likes.
+ v_h.value_ptr() = ptr;
+ v_h.set_instance_registered(true); // To prevent init_instance from registering it
+ v_h.type->init_instance(v_h.inst, nullptr); // Set up the holder
+ Holder<Class> temp_holder(std::move(v_h.holder<Holder<Class>>())); // Steal the holder
+ v_h.type->dealloc(v_h); // Destroys the moved-out holder remains, resets value ptr to null
+ v_h.set_instance_registered(false);
+
+ construct_alias_from_cpp<Class>(is_alias_constructible<Class>{}, v_h, std::move(*ptr));
+ } else {
+ // Otherwise the type isn't inherited, so we don't need an Alias
+ v_h.value_ptr() = ptr;
+ }
+}
+
+// Pointer return v2: a factory that always returns an alias instance ptr. We simply take over
+// ownership of the pointer.
+template <typename Class, enable_if_t<Class::has_alias, int> = 0>
+void construct(value_and_holder &v_h, Alias<Class> *alias_ptr, bool) {
+ no_nullptr(alias_ptr);
+ v_h.value_ptr() = static_cast<Cpp<Class> *>(alias_ptr);
+}
+
+// Holder return: copy its pointer, and move or copy the returned holder into the new instance's
+// holder. This also handles types like std::shared_ptr<T> and std::unique_ptr<T> where T is a
+// derived type (through those holder's implicit conversion from derived class holder constructors).
+template <typename Class>
+void construct(value_and_holder &v_h, Holder<Class> holder, bool need_alias) {
+ auto *ptr = holder_helper<Holder<Class>>::get(holder);
+ no_nullptr(ptr);
+ // If we need an alias, check that the held pointer is actually an alias instance
+ if (Class::has_alias && need_alias && !is_alias<Class>(ptr))
+ throw type_error("pybind11::init(): construction failed: returned holder-wrapped instance "
+ "is not an alias instance");
+
+ v_h.value_ptr() = ptr;
+ v_h.type->init_instance(v_h.inst, &holder);
+}
+
+// return-by-value version 1: returning a cpp class by value. If the class has an alias and an
+// alias is required the alias must have an `Alias(Cpp &&)` constructor so that we can construct
+// the alias from the base when needed (i.e. because of Python-side inheritance). When we don't
+// need it, we simply move-construct the cpp value into a new instance.
+template <typename Class>
+void construct(value_and_holder &v_h, Cpp<Class> &&result, bool need_alias) {
+ static_assert(std::is_move_constructible<Cpp<Class>>::value,
+ "pybind11::init() return-by-value factory function requires a movable class");
+ if (Class::has_alias && need_alias)
+ construct_alias_from_cpp<Class>(is_alias_constructible<Class>{}, v_h, std::move(result));
+ else
+ v_h.value_ptr() = new Cpp<Class>(std::move(result));
+}
+
+// return-by-value version 2: returning a value of the alias type itself. We move-construct an
+// Alias instance (even if no the python-side inheritance is involved). The is intended for
+// cases where Alias initialization is always desired.
+template <typename Class>
+void construct(value_and_holder &v_h, Alias<Class> &&result, bool) {
+ static_assert(std::is_move_constructible<Alias<Class>>::value,
+ "pybind11::init() return-by-alias-value factory function requires a movable alias class");
+ v_h.value_ptr() = new Alias<Class>(std::move(result));
+}
+
+// Implementing class for py::init<...>()
+template <typename... Args>
+struct constructor {
+ template <typename Class, typename... Extra, enable_if_t<!Class::has_alias, int> = 0>
+ static void execute(Class &cl, const Extra&... extra) {
+ cl.def("__init__", [](value_and_holder &v_h, Args... args) {
+ v_h.value_ptr() = construct_or_initialize<Cpp<Class>>(std::forward<Args>(args)...);
+ }, is_new_style_constructor(), extra...);
+ }
+
+ template <typename Class, typename... Extra,
+ enable_if_t<Class::has_alias &&
+ std::is_constructible<Cpp<Class>, Args...>::value, int> = 0>
+ static void execute(Class &cl, const Extra&... extra) {
+ cl.def("__init__", [](value_and_holder &v_h, Args... args) {
+ if (Py_TYPE(v_h.inst) == v_h.type->type)
+ v_h.value_ptr() = construct_or_initialize<Cpp<Class>>(std::forward<Args>(args)...);
+ else
+ v_h.value_ptr() = construct_or_initialize<Alias<Class>>(std::forward<Args>(args)...);
+ }, is_new_style_constructor(), extra...);
+ }
+
+ template <typename Class, typename... Extra,
+ enable_if_t<Class::has_alias &&
+ !std::is_constructible<Cpp<Class>, Args...>::value, int> = 0>
+ static void execute(Class &cl, const Extra&... extra) {
+ cl.def("__init__", [](value_and_holder &v_h, Args... args) {
+ v_h.value_ptr() = construct_or_initialize<Alias<Class>>(std::forward<Args>(args)...);
+ }, is_new_style_constructor(), extra...);
+ }
+};
+
+// Implementing class for py::init_alias<...>()
+template <typename... Args> struct alias_constructor {
+ template <typename Class, typename... Extra,
+ enable_if_t<Class::has_alias && std::is_constructible<Alias<Class>, Args...>::value, int> = 0>
+ static void execute(Class &cl, const Extra&... extra) {
+ cl.def("__init__", [](value_and_holder &v_h, Args... args) {
+ v_h.value_ptr() = construct_or_initialize<Alias<Class>>(std::forward<Args>(args)...);
+ }, is_new_style_constructor(), extra...);
+ }
+};
+
+// Implementation class for py::init(Func) and py::init(Func, AliasFunc)
+template <typename CFunc, typename AFunc = void_type (*)(),
+ typename = function_signature_t<CFunc>, typename = function_signature_t<AFunc>>
+struct factory;
+
+// Specialization for py::init(Func)
+template <typename Func, typename Return, typename... Args>
+struct factory<Func, void_type (*)(), Return(Args...)> {
+ remove_reference_t<Func> class_factory;
+
+ factory(Func &&f) : class_factory(std::forward<Func>(f)) { }
+
+ // The given class either has no alias or has no separate alias factory;
+ // this always constructs the class itself. If the class is registered with an alias
+ // type and an alias instance is needed (i.e. because the final type is a Python class
+ // inheriting from the C++ type) the returned value needs to either already be an alias
+ // instance, or the alias needs to be constructible from a `Class &&` argument.
+ template <typename Class, typename... Extra>
+ void execute(Class &cl, const Extra &...extra) && {
+ #if defined(PYBIND11_CPP14)
+ cl.def("__init__", [func = std::move(class_factory)]
+ #else
+ auto &func = class_factory;
+ cl.def("__init__", [func]
+ #endif
+ (value_and_holder &v_h, Args... args) {
+ construct<Class>(v_h, func(std::forward<Args>(args)...),
+ Py_TYPE(v_h.inst) != v_h.type->type);
+ }, is_new_style_constructor(), extra...);
+ }
+};
+
+// Specialization for py::init(Func, AliasFunc)
+template <typename CFunc, typename AFunc,
+ typename CReturn, typename... CArgs, typename AReturn, typename... AArgs>
+struct factory<CFunc, AFunc, CReturn(CArgs...), AReturn(AArgs...)> {
+ static_assert(sizeof...(CArgs) == sizeof...(AArgs),
+ "pybind11::init(class_factory, alias_factory): class and alias factories "
+ "must have identical argument signatures");
+ static_assert(all_of<std::is_same<CArgs, AArgs>...>::value,
+ "pybind11::init(class_factory, alias_factory): class and alias factories "
+ "must have identical argument signatures");
+
+ remove_reference_t<CFunc> class_factory;
+ remove_reference_t<AFunc> alias_factory;
+
+ factory(CFunc &&c, AFunc &&a)
+ : class_factory(std::forward<CFunc>(c)), alias_factory(std::forward<AFunc>(a)) { }
+
+ // The class factory is called when the `self` type passed to `__init__` is the direct
+ // class (i.e. not inherited), the alias factory when `self` is a Python-side subtype.
+ template <typename Class, typename... Extra>
+ void execute(Class &cl, const Extra&... extra) && {
+ static_assert(Class::has_alias, "The two-argument version of `py::init()` can "
+ "only be used if the class has an alias");
+ #if defined(PYBIND11_CPP14)
+ cl.def("__init__", [class_func = std::move(class_factory), alias_func = std::move(alias_factory)]
+ #else
+ auto &class_func = class_factory;
+ auto &alias_func = alias_factory;
+ cl.def("__init__", [class_func, alias_func]
+ #endif
+ (value_and_holder &v_h, CArgs... args) {
+ if (Py_TYPE(v_h.inst) == v_h.type->type)
+ // If the instance type equals the registered type we don't have inheritance, so
+ // don't need the alias and can construct using the class function:
+ construct<Class>(v_h, class_func(std::forward<CArgs>(args)...), false);
+ else
+ construct<Class>(v_h, alias_func(std::forward<CArgs>(args)...), true);
+ }, is_new_style_constructor(), extra...);
+ }
+};
+
+/// Set just the C++ state. Same as `__init__`.
+template <typename Class, typename T>
+void setstate(value_and_holder &v_h, T &&result, bool need_alias) {
+ construct<Class>(v_h, std::forward<T>(result), need_alias);
+}
+
+/// Set both the C++ and Python states
+template <typename Class, typename T, typename O,
+ enable_if_t<std::is_convertible<O, handle>::value, int> = 0>
+void setstate(value_and_holder &v_h, std::pair<T, O> &&result, bool need_alias) {
+ construct<Class>(v_h, std::move(result.first), need_alias);
+ setattr((PyObject *) v_h.inst, "__dict__", result.second);
+}
+
+/// Implementation for py::pickle(GetState, SetState)
+template <typename Get, typename Set,
+ typename = function_signature_t<Get>, typename = function_signature_t<Set>>
+struct pickle_factory;
+
+template <typename Get, typename Set,
+ typename RetState, typename Self, typename NewInstance, typename ArgState>
+struct pickle_factory<Get, Set, RetState(Self), NewInstance(ArgState)> {
+ static_assert(std::is_same<intrinsic_t<RetState>, intrinsic_t<ArgState>>::value,
+ "The type returned by `__getstate__` must be the same "
+ "as the argument accepted by `__setstate__`");
+
+ remove_reference_t<Get> get;
+ remove_reference_t<Set> set;
+
+ pickle_factory(Get get, Set set)
+ : get(std::forward<Get>(get)), set(std::forward<Set>(set)) { }
+
+ template <typename Class, typename... Extra>
+ void execute(Class &cl, const Extra &...extra) && {
+ cl.def("__getstate__", std::move(get));
+
+#if defined(PYBIND11_CPP14)
+ cl.def("__setstate__", [func = std::move(set)]
+#else
+ auto &func = set;
+ cl.def("__setstate__", [func]
+#endif
+ (value_and_holder &v_h, ArgState state) {
+ setstate<Class>(v_h, func(std::forward<ArgState>(state)),
+ Py_TYPE(v_h.inst) != v_h.type->type);
+ }, is_new_style_constructor(), extra...);
+ }
+};
+
+PYBIND11_NAMESPACE_END(initimpl)
+PYBIND11_NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(pybind11)
--- /dev/null
+/*
+ pybind11/detail/internals.h: Internal data structure and related functions
+
+ Copyright (c) 2017 Wenzel Jakob <wenzel.jakob@epfl.ch>
+
+ All rights reserved. Use of this source code is governed by a
+ BSD-style license that can be found in the LICENSE file.
+*/
+
+#pragma once
+
+#include "../pytypes.h"
+
+PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_BEGIN(detail)
+// Forward declarations
+inline PyTypeObject *make_static_property_type();
+inline PyTypeObject *make_default_metaclass();
+inline PyObject *make_object_base_type(PyTypeObject *metaclass);
+
+// The old Python Thread Local Storage (TLS) API is deprecated in Python 3.7 in favor of the new
+// Thread Specific Storage (TSS) API.
+#if PY_VERSION_HEX >= 0x03070000
+# define PYBIND11_TLS_KEY_INIT(var) Py_tss_t *var = nullptr
+# define PYBIND11_TLS_GET_VALUE(key) PyThread_tss_get((key))
+# define PYBIND11_TLS_REPLACE_VALUE(key, value) PyThread_tss_set((key), (value))
+# define PYBIND11_TLS_DELETE_VALUE(key) PyThread_tss_set((key), nullptr)
+# define PYBIND11_TLS_FREE(key) PyThread_tss_free(key)
+#else
+ // Usually an int but a long on Cygwin64 with Python 3.x
+# define PYBIND11_TLS_KEY_INIT(var) decltype(PyThread_create_key()) var = 0
+# define PYBIND11_TLS_GET_VALUE(key) PyThread_get_key_value((key))
+# if PY_MAJOR_VERSION < 3
+# define PYBIND11_TLS_DELETE_VALUE(key) \
+ PyThread_delete_key_value(key)
+# define PYBIND11_TLS_REPLACE_VALUE(key, value) \
+ do { \
+ PyThread_delete_key_value((key)); \
+ PyThread_set_key_value((key), (value)); \
+ } while (false)
+# else
+# define PYBIND11_TLS_DELETE_VALUE(key) \
+ PyThread_set_key_value((key), nullptr)
+# define PYBIND11_TLS_REPLACE_VALUE(key, value) \
+ PyThread_set_key_value((key), (value))
+# endif
+# define PYBIND11_TLS_FREE(key) (void)key
+#endif
+
+// Python loads modules by default with dlopen with the RTLD_LOCAL flag; under libc++ and possibly
+// other STLs, this means `typeid(A)` from one module won't equal `typeid(A)` from another module
+// even when `A` is the same, non-hidden-visibility type (e.g. from a common include). Under
+// libstdc++, this doesn't happen: equality and the type_index hash are based on the type name,
+// which works. If not under a known-good stl, provide our own name-based hash and equality
+// functions that use the type name.
+#if defined(__GLIBCXX__)
+inline bool same_type(const std::type_info &lhs, const std::type_info &rhs) { return lhs == rhs; }
+using type_hash = std::hash<std::type_index>;
+using type_equal_to = std::equal_to<std::type_index>;
+#else
+inline bool same_type(const std::type_info &lhs, const std::type_info &rhs) {
+ return lhs.name() == rhs.name() || std::strcmp(lhs.name(), rhs.name()) == 0;
+}
+
+struct type_hash {
+ size_t operator()(const std::type_index &t) const {
+ size_t hash = 5381;
+ const char *ptr = t.name();
+ while (auto c = static_cast<unsigned char>(*ptr++))
+ hash = (hash * 33) ^ c;
+ return hash;
+ }
+};
+
+struct type_equal_to {
+ bool operator()(const std::type_index &lhs, const std::type_index &rhs) const {
+ return lhs.name() == rhs.name() || std::strcmp(lhs.name(), rhs.name()) == 0;
+ }
+};
+#endif
+
+template <typename value_type>
+using type_map = std::unordered_map<std::type_index, value_type, type_hash, type_equal_to>;
+
+struct override_hash {
+ inline size_t operator()(const std::pair<const PyObject *, const char *>& v) const {
+ size_t value = std::hash<const void *>()(v.first);
+ value ^= std::hash<const void *>()(v.second) + 0x9e3779b9 + (value<<6) + (value>>2);
+ return value;
+ }
+};
+
+/// Internal data structure used to track registered instances and types.
+/// Whenever binary incompatible changes are made to this structure,
+/// `PYBIND11_INTERNALS_VERSION` must be incremented.
+struct internals {
+ type_map<type_info *> registered_types_cpp; // std::type_index -> pybind11's type information
+ std::unordered_map<PyTypeObject *, std::vector<type_info *>> registered_types_py; // PyTypeObject* -> base type_info(s)
+ std::unordered_multimap<const void *, instance*> registered_instances; // void * -> instance*
+ std::unordered_set<std::pair<const PyObject *, const char *>, override_hash> inactive_override_cache;
+ type_map<std::vector<bool (*)(PyObject *, void *&)>> direct_conversions;
+ std::unordered_map<const PyObject *, std::vector<PyObject *>> patients;
+ std::forward_list<void (*) (std::exception_ptr)> registered_exception_translators;
+ std::unordered_map<std::string, void *> shared_data; // Custom data to be shared across extensions
+ std::vector<PyObject *> loader_patient_stack; // Used by `loader_life_support`
+ std::forward_list<std::string> static_strings; // Stores the std::strings backing detail::c_str()
+ PyTypeObject *static_property_type;
+ PyTypeObject *default_metaclass;
+ PyObject *instance_base;
+#if defined(WITH_THREAD)
+ PYBIND11_TLS_KEY_INIT(tstate);
+ PyInterpreterState *istate = nullptr;
+ ~internals() {
+ // This destructor is called *after* Py_Finalize() in finalize_interpreter().
+ // That *SHOULD BE* fine. The following details what happens whe PyThread_tss_free is called.
+ // PYBIND11_TLS_FREE is PyThread_tss_free on python 3.7+. On older python, it does nothing.
+ // PyThread_tss_free calls PyThread_tss_delete and PyMem_RawFree.
+ // PyThread_tss_delete just calls TlsFree (on Windows) or pthread_key_delete (on *NIX). Neither
+ // of those have anything to do with CPython internals.
+ // PyMem_RawFree *requires* that the `tstate` be allocated with the CPython allocator.
+ PYBIND11_TLS_FREE(tstate);
+ }
+#endif
+};
+
+/// Additional type information which does not fit into the PyTypeObject.
+/// Changes to this struct also require bumping `PYBIND11_INTERNALS_VERSION`.
+struct type_info {
+ PyTypeObject *type;
+ const std::type_info *cpptype;
+ size_t type_size, type_align, holder_size_in_ptrs;
+ void *(*operator_new)(size_t);
+ void (*init_instance)(instance *, const void *);
+ void (*dealloc)(value_and_holder &v_h);
+ std::vector<PyObject *(*)(PyObject *, PyTypeObject *)> implicit_conversions;
+ std::vector<std::pair<const std::type_info *, void *(*)(void *)>> implicit_casts;
+ std::vector<bool (*)(PyObject *, void *&)> *direct_conversions;
+ buffer_info *(*get_buffer)(PyObject *, void *) = nullptr;
+ void *get_buffer_data = nullptr;
+ void *(*module_local_load)(PyObject *, const type_info *) = nullptr;
+ /* A simple type never occurs as a (direct or indirect) parent
+ * of a class that makes use of multiple inheritance */
+ bool simple_type : 1;
+ /* True if there is no multiple inheritance in this type's inheritance tree */
+ bool simple_ancestors : 1;
+ /* for base vs derived holder_type checks */
+ bool default_holder : 1;
+ /* true if this is a type registered with py::module_local */
+ bool module_local : 1;
+};
+
+/// Tracks the `internals` and `type_info` ABI version independent of the main library version
+#define PYBIND11_INTERNALS_VERSION 4
+
+/// On MSVC, debug and release builds are not ABI-compatible!
+#if defined(_MSC_VER) && defined(_DEBUG)
+# define PYBIND11_BUILD_TYPE "_debug"
+#else
+# define PYBIND11_BUILD_TYPE ""
+#endif
+
+/// Let's assume that different compilers are ABI-incompatible.
+/// A user can manually set this string if they know their
+/// compiler is compatible.
+#ifndef PYBIND11_COMPILER_TYPE
+# if defined(_MSC_VER)
+# define PYBIND11_COMPILER_TYPE "_msvc"
+# elif defined(__INTEL_COMPILER)
+# define PYBIND11_COMPILER_TYPE "_icc"
+# elif defined(__clang__)
+# define PYBIND11_COMPILER_TYPE "_clang"
+# elif defined(__PGI)
+# define PYBIND11_COMPILER_TYPE "_pgi"
+# elif defined(__MINGW32__)
+# define PYBIND11_COMPILER_TYPE "_mingw"
+# elif defined(__CYGWIN__)
+# define PYBIND11_COMPILER_TYPE "_gcc_cygwin"
+# elif defined(__GNUC__)
+# define PYBIND11_COMPILER_TYPE "_gcc"
+# else
+# define PYBIND11_COMPILER_TYPE "_unknown"
+# endif
+#endif
+
+/// Also standard libs
+#ifndef PYBIND11_STDLIB
+# if defined(_LIBCPP_VERSION)
+# define PYBIND11_STDLIB "_libcpp"
+# elif defined(__GLIBCXX__) || defined(__GLIBCPP__)
+# define PYBIND11_STDLIB "_libstdcpp"
+# else
+# define PYBIND11_STDLIB ""
+# endif
+#endif
+
+/// On Linux/OSX, changes in __GXX_ABI_VERSION__ indicate ABI incompatibility.
+#ifndef PYBIND11_BUILD_ABI
+# if defined(__GXX_ABI_VERSION)
+# define PYBIND11_BUILD_ABI "_cxxabi" PYBIND11_TOSTRING(__GXX_ABI_VERSION)
+# else
+# define PYBIND11_BUILD_ABI ""
+# endif
+#endif
+
+#ifndef PYBIND11_INTERNALS_KIND
+# if defined(WITH_THREAD)
+# define PYBIND11_INTERNALS_KIND ""
+# else
+# define PYBIND11_INTERNALS_KIND "_without_thread"
+# endif
+#endif
+
+#define PYBIND11_INTERNALS_ID "__pybind11_internals_v" \
+ PYBIND11_TOSTRING(PYBIND11_INTERNALS_VERSION) PYBIND11_INTERNALS_KIND PYBIND11_COMPILER_TYPE PYBIND11_STDLIB PYBIND11_BUILD_ABI PYBIND11_BUILD_TYPE "__"
+
+#define PYBIND11_MODULE_LOCAL_ID "__pybind11_module_local_v" \
+ PYBIND11_TOSTRING(PYBIND11_INTERNALS_VERSION) PYBIND11_INTERNALS_KIND PYBIND11_COMPILER_TYPE PYBIND11_STDLIB PYBIND11_BUILD_ABI PYBIND11_BUILD_TYPE "__"
+
+/// Each module locally stores a pointer to the `internals` data. The data
+/// itself is shared among modules with the same `PYBIND11_INTERNALS_ID`.
+inline internals **&get_internals_pp() {
+ static internals **internals_pp = nullptr;
+ return internals_pp;
+}
+
+inline void translate_exception(std::exception_ptr p) {
+ try {
+ if (p) std::rethrow_exception(p);
+ } catch (error_already_set &e) { e.restore(); return;
+ } catch (const builtin_exception &e) { e.set_error(); return;
+ } catch (const std::bad_alloc &e) { PyErr_SetString(PyExc_MemoryError, e.what()); return;
+ } catch (const std::domain_error &e) { PyErr_SetString(PyExc_ValueError, e.what()); return;
+ } catch (const std::invalid_argument &e) { PyErr_SetString(PyExc_ValueError, e.what()); return;
+ } catch (const std::length_error &e) { PyErr_SetString(PyExc_ValueError, e.what()); return;
+ } catch (const std::out_of_range &e) { PyErr_SetString(PyExc_IndexError, e.what()); return;
+ } catch (const std::range_error &e) { PyErr_SetString(PyExc_ValueError, e.what()); return;
+ } catch (const std::overflow_error &e) { PyErr_SetString(PyExc_OverflowError, e.what()); return;
+ } catch (const std::exception &e) { PyErr_SetString(PyExc_RuntimeError, e.what()); return;
+ } catch (...) {
+ PyErr_SetString(PyExc_RuntimeError, "Caught an unknown exception!");
+ return;
+ }
+}
+
+#if !defined(__GLIBCXX__)
+inline void translate_local_exception(std::exception_ptr p) {
+ try {
+ if (p) std::rethrow_exception(p);
+ } catch (error_already_set &e) { e.restore(); return;
+ } catch (const builtin_exception &e) { e.set_error(); return;
+ }
+}
+#endif
+
+/// Return a reference to the current `internals` data
+PYBIND11_NOINLINE inline internals &get_internals() {
+ auto **&internals_pp = get_internals_pp();
+ if (internals_pp && *internals_pp)
+ return **internals_pp;
+
+ // Ensure that the GIL is held since we will need to make Python calls.
+ // Cannot use py::gil_scoped_acquire here since that constructor calls get_internals.
+ struct gil_scoped_acquire_local {
+ gil_scoped_acquire_local() : state (PyGILState_Ensure()) {}
+ ~gil_scoped_acquire_local() { PyGILState_Release(state); }
+ const PyGILState_STATE state;
+ } gil;
+
+ constexpr auto *id = PYBIND11_INTERNALS_ID;
+ auto builtins = handle(PyEval_GetBuiltins());
+ if (builtins.contains(id) && isinstance<capsule>(builtins[id])) {
+ internals_pp = static_cast<internals **>(capsule(builtins[id]));
+
+ // We loaded builtins through python's builtins, which means that our `error_already_set`
+ // and `builtin_exception` may be different local classes than the ones set up in the
+ // initial exception translator, below, so add another for our local exception classes.
+ //
+ // libstdc++ doesn't require this (types there are identified only by name)
+#if !defined(__GLIBCXX__)
+ (*internals_pp)->registered_exception_translators.push_front(&translate_local_exception);
+#endif
+ } else {
+ if (!internals_pp) internals_pp = new internals*();
+ auto *&internals_ptr = *internals_pp;
+ internals_ptr = new internals();
+#if defined(WITH_THREAD)
+
+ #if PY_VERSION_HEX < 0x03090000
+ PyEval_InitThreads();
+ #endif
+ PyThreadState *tstate = PyThreadState_Get();
+ #if PY_VERSION_HEX >= 0x03070000
+ internals_ptr->tstate = PyThread_tss_alloc();
+ if (!internals_ptr->tstate || PyThread_tss_create(internals_ptr->tstate))
+ pybind11_fail("get_internals: could not successfully initialize the TSS key!");
+ PyThread_tss_set(internals_ptr->tstate, tstate);
+ #else
+ internals_ptr->tstate = PyThread_create_key();
+ if (internals_ptr->tstate == -1)
+ pybind11_fail("get_internals: could not successfully initialize the TLS key!");
+ PyThread_set_key_value(internals_ptr->tstate, tstate);
+ #endif
+ internals_ptr->istate = tstate->interp;
+#endif
+ builtins[id] = capsule(internals_pp);
+ internals_ptr->registered_exception_translators.push_front(&translate_exception);
+ internals_ptr->static_property_type = make_static_property_type();
+ internals_ptr->default_metaclass = make_default_metaclass();
+ internals_ptr->instance_base = make_object_base_type(internals_ptr->default_metaclass);
+ }
+ return **internals_pp;
+}
+
+/// Works like `internals.registered_types_cpp`, but for module-local registered types:
+inline type_map<type_info *> ®istered_local_types_cpp() {
+ static type_map<type_info *> locals{};
+ return locals;
+}
+
+/// Constructs a std::string with the given arguments, stores it in `internals`, and returns its
+/// `c_str()`. Such strings objects have a long storage duration -- the internal strings are only
+/// cleared when the program exits or after interpreter shutdown (when embedding), and so are
+/// suitable for c-style strings needed by Python internals (such as PyTypeObject's tp_name).
+template <typename... Args>
+const char *c_str(Args &&...args) {
+ auto &strings = get_internals().static_strings;
+ strings.emplace_front(std::forward<Args>(args)...);
+ return strings.front().c_str();
+}
+
+PYBIND11_NAMESPACE_END(detail)
+
+/// Returns a named pointer that is shared among all extension modules (using the same
+/// pybind11 version) running in the current interpreter. Names starting with underscores
+/// are reserved for internal usage. Returns `nullptr` if no matching entry was found.
+inline PYBIND11_NOINLINE void *get_shared_data(const std::string &name) {
+ auto &internals = detail::get_internals();
+ auto it = internals.shared_data.find(name);
+ return it != internals.shared_data.end() ? it->second : nullptr;
+}
+
+/// Set the shared data that can be later recovered by `get_shared_data()`.
+inline PYBIND11_NOINLINE void *set_shared_data(const std::string &name, void *data) {
+ detail::get_internals().shared_data[name] = data;
+ return data;
+}
+
+/// Returns a typed reference to a shared data entry (by using `get_shared_data()`) if
+/// such entry exists. Otherwise, a new object of default-constructible type `T` is
+/// added to the shared data under the given name and a reference to it is returned.
+template<typename T>
+T &get_or_create_shared_data(const std::string &name) {
+ auto &internals = detail::get_internals();
+ auto it = internals.shared_data.find(name);
+ T *ptr = (T *) (it != internals.shared_data.end() ? it->second : nullptr);
+ if (!ptr) {
+ ptr = new T();
+ internals.shared_data[name] = ptr;
+ }
+ return *ptr;
+}
+
+PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
--- /dev/null
+/*
+ pybind11/detail/typeid.h: Compiler-independent access to type identifiers
+
+ Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>
+
+ All rights reserved. Use of this source code is governed by a
+ BSD-style license that can be found in the LICENSE file.
+*/
+
+#pragma once
+
+#include <cstdio>
+#include <cstdlib>
+
+#if defined(__GNUG__)
+#include <cxxabi.h>
+#endif
+
+#include "common.h"
+
+PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_BEGIN(detail)
+/// Erase all occurrences of a substring
+inline void erase_all(std::string &string, const std::string &search) {
+ for (size_t pos = 0;;) {
+ pos = string.find(search, pos);
+ if (pos == std::string::npos) break;
+ string.erase(pos, search.length());
+ }
+}
+
+PYBIND11_NOINLINE inline void clean_type_id(std::string &name) {
+#if defined(__GNUG__)
+ int status = 0;
+ std::unique_ptr<char, void (*)(void *)> res {
+ abi::__cxa_demangle(name.c_str(), nullptr, nullptr, &status), std::free };
+ if (status == 0)
+ name = res.get();
+#else
+ detail::erase_all(name, "class ");
+ detail::erase_all(name, "struct ");
+ detail::erase_all(name, "enum ");
+#endif
+ detail::erase_all(name, "pybind11::");
+}
+PYBIND11_NAMESPACE_END(detail)
+
+/// Return a string representation of a C++ type
+template <typename T> static std::string type_id() {
+ std::string name(typeid(T).name());
+ detail::clean_type_id(name);
+ return name;
+}
+
+PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
--- /dev/null
+/*
+ pybind11/eigen.h: Transparent conversion for dense and sparse Eigen matrices
+
+ Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>
+
+ All rights reserved. Use of this source code is governed by a
+ BSD-style license that can be found in the LICENSE file.
+*/
+
+#pragma once
+
+#include "numpy.h"
+
+#if defined(__INTEL_COMPILER)
+# pragma warning(disable: 1682) // implicit conversion of a 64-bit integral type to a smaller integral type (potential portability problem)
+#elif defined(__GNUG__) || defined(__clang__)
+# pragma GCC diagnostic push
+# pragma GCC diagnostic ignored "-Wconversion"
+# pragma GCC diagnostic ignored "-Wdeprecated-declarations"
+# ifdef __clang__
+// Eigen generates a bunch of implicit-copy-constructor-is-deprecated warnings with -Wdeprecated
+// under Clang, so disable that warning here:
+# pragma GCC diagnostic ignored "-Wdeprecated"
+# endif
+# if __GNUC__ >= 7
+# pragma GCC diagnostic ignored "-Wint-in-bool-context"
+# endif
+#endif
+
+#if defined(_MSC_VER)
+# pragma warning(push)
+# pragma warning(disable: 4127) // warning C4127: Conditional expression is constant
+# pragma warning(disable: 4996) // warning C4996: std::unary_negate is deprecated in C++17
+#endif
+
+#include <Eigen/Core>
+#include <Eigen/SparseCore>
+
+// Eigen prior to 3.2.7 doesn't have proper move constructors--but worse, some classes get implicit
+// move constructors that break things. We could detect this an explicitly copy, but an extra copy
+// of matrices seems highly undesirable.
+static_assert(EIGEN_VERSION_AT_LEAST(3,2,7), "Eigen support in pybind11 requires Eigen >= 3.2.7");
+
+PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
+
+// Provide a convenience alias for easier pass-by-ref usage with fully dynamic strides:
+using EigenDStride = Eigen::Stride<Eigen::Dynamic, Eigen::Dynamic>;
+template <typename MatrixType> using EigenDRef = Eigen::Ref<MatrixType, 0, EigenDStride>;
+template <typename MatrixType> using EigenDMap = Eigen::Map<MatrixType, 0, EigenDStride>;
+
+PYBIND11_NAMESPACE_BEGIN(detail)
+
+#if EIGEN_VERSION_AT_LEAST(3,3,0)
+using EigenIndex = Eigen::Index;
+#else
+using EigenIndex = EIGEN_DEFAULT_DENSE_INDEX_TYPE;
+#endif
+
+// Matches Eigen::Map, Eigen::Ref, blocks, etc:
+template <typename T> using is_eigen_dense_map = all_of<is_template_base_of<Eigen::DenseBase, T>, std::is_base_of<Eigen::MapBase<T, Eigen::ReadOnlyAccessors>, T>>;
+template <typename T> using is_eigen_mutable_map = std::is_base_of<Eigen::MapBase<T, Eigen::WriteAccessors>, T>;
+template <typename T> using is_eigen_dense_plain = all_of<negation<is_eigen_dense_map<T>>, is_template_base_of<Eigen::PlainObjectBase, T>>;
+template <typename T> using is_eigen_sparse = is_template_base_of<Eigen::SparseMatrixBase, T>;
+// Test for objects inheriting from EigenBase<Derived> that aren't captured by the above. This
+// basically covers anything that can be assigned to a dense matrix but that don't have a typical
+// matrix data layout that can be copied from their .data(). For example, DiagonalMatrix and
+// SelfAdjointView fall into this category.
+template <typename T> using is_eigen_other = all_of<
+ is_template_base_of<Eigen::EigenBase, T>,
+ negation<any_of<is_eigen_dense_map<T>, is_eigen_dense_plain<T>, is_eigen_sparse<T>>>
+>;
+
+// Captures numpy/eigen conformability status (returned by EigenProps::conformable()):
+template <bool EigenRowMajor> struct EigenConformable {
+ bool conformable = false;
+ EigenIndex rows = 0, cols = 0;
+ EigenDStride stride{0, 0}; // Only valid if negativestrides is false!
+ bool negativestrides = false; // If true, do not use stride!
+
+ EigenConformable(bool fits = false) : conformable{fits} {}
+ // Matrix type:
+ EigenConformable(EigenIndex r, EigenIndex c,
+ EigenIndex rstride, EigenIndex cstride) :
+ conformable{true}, rows{r}, cols{c} {
+ // TODO: when Eigen bug #747 is fixed, remove the tests for non-negativity. http://eigen.tuxfamily.org/bz/show_bug.cgi?id=747
+ if (rstride < 0 || cstride < 0) {
+ negativestrides = true;
+ } else {
+ stride = {EigenRowMajor ? rstride : cstride /* outer stride */,
+ EigenRowMajor ? cstride : rstride /* inner stride */ };
+ }
+ }
+ // Vector type:
+ EigenConformable(EigenIndex r, EigenIndex c, EigenIndex stride)
+ : EigenConformable(r, c, r == 1 ? c*stride : stride, c == 1 ? r : r*stride) {}
+
+ template <typename props> bool stride_compatible() const {
+ // To have compatible strides, we need (on both dimensions) one of fully dynamic strides,
+ // matching strides, or a dimension size of 1 (in which case the stride value is irrelevant)
+ return
+ !negativestrides &&
+ (props::inner_stride == Eigen::Dynamic || props::inner_stride == stride.inner() ||
+ (EigenRowMajor ? cols : rows) == 1) &&
+ (props::outer_stride == Eigen::Dynamic || props::outer_stride == stride.outer() ||
+ (EigenRowMajor ? rows : cols) == 1);
+ }
+ operator bool() const { return conformable; }
+};
+
+template <typename Type> struct eigen_extract_stride { using type = Type; };
+template <typename PlainObjectType, int MapOptions, typename StrideType>
+struct eigen_extract_stride<Eigen::Map<PlainObjectType, MapOptions, StrideType>> { using type = StrideType; };
+template <typename PlainObjectType, int Options, typename StrideType>
+struct eigen_extract_stride<Eigen::Ref<PlainObjectType, Options, StrideType>> { using type = StrideType; };
+
+// Helper struct for extracting information from an Eigen type
+template <typename Type_> struct EigenProps {
+ using Type = Type_;
+ using Scalar = typename Type::Scalar;
+ using StrideType = typename eigen_extract_stride<Type>::type;
+ static constexpr EigenIndex
+ rows = Type::RowsAtCompileTime,
+ cols = Type::ColsAtCompileTime,
+ size = Type::SizeAtCompileTime;
+ static constexpr bool
+ row_major = Type::IsRowMajor,
+ vector = Type::IsVectorAtCompileTime, // At least one dimension has fixed size 1
+ fixed_rows = rows != Eigen::Dynamic,
+ fixed_cols = cols != Eigen::Dynamic,
+ fixed = size != Eigen::Dynamic, // Fully-fixed size
+ dynamic = !fixed_rows && !fixed_cols; // Fully-dynamic size
+
+ template <EigenIndex i, EigenIndex ifzero> using if_zero = std::integral_constant<EigenIndex, i == 0 ? ifzero : i>;
+ static constexpr EigenIndex inner_stride = if_zero<StrideType::InnerStrideAtCompileTime, 1>::value,
+ outer_stride = if_zero<StrideType::OuterStrideAtCompileTime,
+ vector ? size : row_major ? cols : rows>::value;
+ static constexpr bool dynamic_stride = inner_stride == Eigen::Dynamic && outer_stride == Eigen::Dynamic;
+ static constexpr bool requires_row_major = !dynamic_stride && !vector && (row_major ? inner_stride : outer_stride) == 1;
+ static constexpr bool requires_col_major = !dynamic_stride && !vector && (row_major ? outer_stride : inner_stride) == 1;
+
+ // Takes an input array and determines whether we can make it fit into the Eigen type. If
+ // the array is a vector, we attempt to fit it into either an Eigen 1xN or Nx1 vector
+ // (preferring the latter if it will fit in either, i.e. for a fully dynamic matrix type).
+ static EigenConformable<row_major> conformable(const array &a) {
+ const auto dims = a.ndim();
+ if (dims < 1 || dims > 2)
+ return false;
+
+ if (dims == 2) { // Matrix type: require exact match (or dynamic)
+
+ EigenIndex
+ np_rows = a.shape(0),
+ np_cols = a.shape(1),
+ np_rstride = a.strides(0) / static_cast<ssize_t>(sizeof(Scalar)),
+ np_cstride = a.strides(1) / static_cast<ssize_t>(sizeof(Scalar));
+ if ((fixed_rows && np_rows != rows) || (fixed_cols && np_cols != cols))
+ return false;
+
+ return {np_rows, np_cols, np_rstride, np_cstride};
+ }
+
+ // Otherwise we're storing an n-vector. Only one of the strides will be used, but whichever
+ // is used, we want the (single) numpy stride value.
+ const EigenIndex n = a.shape(0),
+ stride = a.strides(0) / static_cast<ssize_t>(sizeof(Scalar));
+
+ if (vector) { // Eigen type is a compile-time vector
+ if (fixed && size != n)
+ return false; // Vector size mismatch
+ return {rows == 1 ? 1 : n, cols == 1 ? 1 : n, stride};
+ }
+ else if (fixed) {
+ // The type has a fixed size, but is not a vector: abort
+ return false;
+ }
+ else if (fixed_cols) {
+ // Since this isn't a vector, cols must be != 1. We allow this only if it exactly
+ // equals the number of elements (rows is Dynamic, and so 1 row is allowed).
+ if (cols != n) return false;
+ return {1, n, stride};
+ }
+ else {
+ // Otherwise it's either fully dynamic, or column dynamic; both become a column vector
+ if (fixed_rows && rows != n) return false;
+ return {n, 1, stride};
+ }
+ }
+
+ static constexpr bool show_writeable = is_eigen_dense_map<Type>::value && is_eigen_mutable_map<Type>::value;
+ static constexpr bool show_order = is_eigen_dense_map<Type>::value;
+ static constexpr bool show_c_contiguous = show_order && requires_row_major;
+ static constexpr bool show_f_contiguous = !show_c_contiguous && show_order && requires_col_major;
+
+ static constexpr auto descriptor =
+ _("numpy.ndarray[") + npy_format_descriptor<Scalar>::name +
+ _("[") + _<fixed_rows>(_<(size_t) rows>(), _("m")) +
+ _(", ") + _<fixed_cols>(_<(size_t) cols>(), _("n")) +
+ _("]") +
+ // For a reference type (e.g. Ref<MatrixXd>) we have other constraints that might need to be
+ // satisfied: writeable=True (for a mutable reference), and, depending on the map's stride
+ // options, possibly f_contiguous or c_contiguous. We include them in the descriptor output
+ // to provide some hint as to why a TypeError is occurring (otherwise it can be confusing to
+ // see that a function accepts a 'numpy.ndarray[float64[3,2]]' and an error message that you
+ // *gave* a numpy.ndarray of the right type and dimensions.
+ _<show_writeable>(", flags.writeable", "") +
+ _<show_c_contiguous>(", flags.c_contiguous", "") +
+ _<show_f_contiguous>(", flags.f_contiguous", "") +
+ _("]");
+};
+
+// Casts an Eigen type to numpy array. If given a base, the numpy array references the src data,
+// otherwise it'll make a copy. writeable lets you turn off the writeable flag for the array.
+template <typename props> handle eigen_array_cast(typename props::Type const &src, handle base = handle(), bool writeable = true) {
+ constexpr ssize_t elem_size = sizeof(typename props::Scalar);
+ array a;
+ if (props::vector)
+ a = array({ src.size() }, { elem_size * src.innerStride() }, src.data(), base);
+ else
+ a = array({ src.rows(), src.cols() }, { elem_size * src.rowStride(), elem_size * src.colStride() },
+ src.data(), base);
+
+ if (!writeable)
+ array_proxy(a.ptr())->flags &= ~detail::npy_api::NPY_ARRAY_WRITEABLE_;
+
+ return a.release();
+}
+
+// Takes an lvalue ref to some Eigen type and a (python) base object, creating a numpy array that
+// reference the Eigen object's data with `base` as the python-registered base class (if omitted,
+// the base will be set to None, and lifetime management is up to the caller). The numpy array is
+// non-writeable if the given type is const.
+template <typename props, typename Type>
+handle eigen_ref_array(Type &src, handle parent = none()) {
+ // none here is to get past array's should-we-copy detection, which currently always
+ // copies when there is no base. Setting the base to None should be harmless.
+ return eigen_array_cast<props>(src, parent, !std::is_const<Type>::value);
+}
+
+// Takes a pointer to some dense, plain Eigen type, builds a capsule around it, then returns a numpy
+// array that references the encapsulated data with a python-side reference to the capsule to tie
+// its destruction to that of any dependent python objects. Const-ness is determined by whether or
+// not the Type of the pointer given is const.
+template <typename props, typename Type, typename = enable_if_t<is_eigen_dense_plain<Type>::value>>
+handle eigen_encapsulate(Type *src) {
+ capsule base(src, [](void *o) { delete static_cast<Type *>(o); });
+ return eigen_ref_array<props>(*src, base);
+}
+
+// Type caster for regular, dense matrix types (e.g. MatrixXd), but not maps/refs/etc. of dense
+// types.
+template<typename Type>
+struct type_caster<Type, enable_if_t<is_eigen_dense_plain<Type>::value>> {
+ using Scalar = typename Type::Scalar;
+ using props = EigenProps<Type>;
+
+ bool load(handle src, bool convert) {
+ // If we're in no-convert mode, only load if given an array of the correct type
+ if (!convert && !isinstance<array_t<Scalar>>(src))
+ return false;
+
+ // Coerce into an array, but don't do type conversion yet; the copy below handles it.
+ auto buf = array::ensure(src);
+
+ if (!buf)
+ return false;
+
+ auto dims = buf.ndim();
+ if (dims < 1 || dims > 2)
+ return false;
+
+ auto fits = props::conformable(buf);
+ if (!fits)
+ return false;
+
+ // Allocate the new type, then build a numpy reference into it
+ value = Type(fits.rows, fits.cols);
+ auto ref = reinterpret_steal<array>(eigen_ref_array<props>(value));
+ if (dims == 1) ref = ref.squeeze();
+ else if (ref.ndim() == 1) buf = buf.squeeze();
+
+ int result = detail::npy_api::get().PyArray_CopyInto_(ref.ptr(), buf.ptr());
+
+ if (result < 0) { // Copy failed!
+ PyErr_Clear();
+ return false;
+ }
+
+ return true;
+ }
+
+private:
+
+ // Cast implementation
+ template <typename CType>
+ static handle cast_impl(CType *src, return_value_policy policy, handle parent) {
+ switch (policy) {
+ case return_value_policy::take_ownership:
+ case return_value_policy::automatic:
+ return eigen_encapsulate<props>(src);
+ case return_value_policy::move:
+ return eigen_encapsulate<props>(new CType(std::move(*src)));
+ case return_value_policy::copy:
+ return eigen_array_cast<props>(*src);
+ case return_value_policy::reference:
+ case return_value_policy::automatic_reference:
+ return eigen_ref_array<props>(*src);
+ case return_value_policy::reference_internal:
+ return eigen_ref_array<props>(*src, parent);
+ default:
+ throw cast_error("unhandled return_value_policy: should not happen!");
+ };
+ }
+
+public:
+
+ // Normal returned non-reference, non-const value:
+ static handle cast(Type &&src, return_value_policy /* policy */, handle parent) {
+ return cast_impl(&src, return_value_policy::move, parent);
+ }
+ // If you return a non-reference const, we mark the numpy array readonly:
+ static handle cast(const Type &&src, return_value_policy /* policy */, handle parent) {
+ return cast_impl(&src, return_value_policy::move, parent);
+ }
+ // lvalue reference return; default (automatic) becomes copy
+ static handle cast(Type &src, return_value_policy policy, handle parent) {
+ if (policy == return_value_policy::automatic || policy == return_value_policy::automatic_reference)
+ policy = return_value_policy::copy;
+ return cast_impl(&src, policy, parent);
+ }
+ // const lvalue reference return; default (automatic) becomes copy
+ static handle cast(const Type &src, return_value_policy policy, handle parent) {
+ if (policy == return_value_policy::automatic || policy == return_value_policy::automatic_reference)
+ policy = return_value_policy::copy;
+ return cast(&src, policy, parent);
+ }
+ // non-const pointer return
+ static handle cast(Type *src, return_value_policy policy, handle parent) {
+ return cast_impl(src, policy, parent);
+ }
+ // const pointer return
+ static handle cast(const Type *src, return_value_policy policy, handle parent) {
+ return cast_impl(src, policy, parent);
+ }
+
+ static constexpr auto name = props::descriptor;
+
+ operator Type*() { return &value; }
+ operator Type&() { return value; }
+ operator Type&&() && { return std::move(value); }
+ template <typename T> using cast_op_type = movable_cast_op_type<T>;
+
+private:
+ Type value;
+};
+
+// Base class for casting reference/map/block/etc. objects back to python.
+template <typename MapType> struct eigen_map_caster {
+private:
+ using props = EigenProps<MapType>;
+
+public:
+
+ // Directly referencing a ref/map's data is a bit dangerous (whatever the map/ref points to has
+ // to stay around), but we'll allow it under the assumption that you know what you're doing (and
+ // have an appropriate keep_alive in place). We return a numpy array pointing directly at the
+ // ref's data (The numpy array ends up read-only if the ref was to a const matrix type.) Note
+ // that this means you need to ensure you don't destroy the object in some other way (e.g. with
+ // an appropriate keep_alive, or with a reference to a statically allocated matrix).
+ static handle cast(const MapType &src, return_value_policy policy, handle parent) {
+ switch (policy) {
+ case return_value_policy::copy:
+ return eigen_array_cast<props>(src);
+ case return_value_policy::reference_internal:
+ return eigen_array_cast<props>(src, parent, is_eigen_mutable_map<MapType>::value);
+ case return_value_policy::reference:
+ case return_value_policy::automatic:
+ case return_value_policy::automatic_reference:
+ return eigen_array_cast<props>(src, none(), is_eigen_mutable_map<MapType>::value);
+ default:
+ // move, take_ownership don't make any sense for a ref/map:
+ pybind11_fail("Invalid return_value_policy for Eigen Map/Ref/Block type");
+ }
+ }
+
+ static constexpr auto name = props::descriptor;
+
+ // Explicitly delete these: support python -> C++ conversion on these (i.e. these can be return
+ // types but not bound arguments). We still provide them (with an explicitly delete) so that
+ // you end up here if you try anyway.
+ bool load(handle, bool) = delete;
+ operator MapType() = delete;
+ template <typename> using cast_op_type = MapType;
+};
+
+// We can return any map-like object (but can only load Refs, specialized next):
+template <typename Type> struct type_caster<Type, enable_if_t<is_eigen_dense_map<Type>::value>>
+ : eigen_map_caster<Type> {};
+
+// Loader for Ref<...> arguments. See the documentation for info on how to make this work without
+// copying (it requires some extra effort in many cases).
+template <typename PlainObjectType, typename StrideType>
+struct type_caster<
+ Eigen::Ref<PlainObjectType, 0, StrideType>,
+ enable_if_t<is_eigen_dense_map<Eigen::Ref<PlainObjectType, 0, StrideType>>::value>
+> : public eigen_map_caster<Eigen::Ref<PlainObjectType, 0, StrideType>> {
+private:
+ using Type = Eigen::Ref<PlainObjectType, 0, StrideType>;
+ using props = EigenProps<Type>;
+ using Scalar = typename props::Scalar;
+ using MapType = Eigen::Map<PlainObjectType, 0, StrideType>;
+ using Array = array_t<Scalar, array::forcecast |
+ ((props::row_major ? props::inner_stride : props::outer_stride) == 1 ? array::c_style :
+ (props::row_major ? props::outer_stride : props::inner_stride) == 1 ? array::f_style : 0)>;
+ static constexpr bool need_writeable = is_eigen_mutable_map<Type>::value;
+ // Delay construction (these have no default constructor)
+ std::unique_ptr<MapType> map;
+ std::unique_ptr<Type> ref;
+ // Our array. When possible, this is just a numpy array pointing to the source data, but
+ // sometimes we can't avoid copying (e.g. input is not a numpy array at all, has an incompatible
+ // layout, or is an array of a type that needs to be converted). Using a numpy temporary
+ // (rather than an Eigen temporary) saves an extra copy when we need both type conversion and
+ // storage order conversion. (Note that we refuse to use this temporary copy when loading an
+ // argument for a Ref<M> with M non-const, i.e. a read-write reference).
+ Array copy_or_ref;
+public:
+ bool load(handle src, bool convert) {
+ // First check whether what we have is already an array of the right type. If not, we can't
+ // avoid a copy (because the copy is also going to do type conversion).
+ bool need_copy = !isinstance<Array>(src);
+
+ EigenConformable<props::row_major> fits;
+ if (!need_copy) {
+ // We don't need a converting copy, but we also need to check whether the strides are
+ // compatible with the Ref's stride requirements
+ auto aref = reinterpret_borrow<Array>(src);
+
+ if (aref && (!need_writeable || aref.writeable())) {
+ fits = props::conformable(aref);
+ if (!fits) return false; // Incompatible dimensions
+ if (!fits.template stride_compatible<props>())
+ need_copy = true;
+ else
+ copy_or_ref = std::move(aref);
+ }
+ else {
+ need_copy = true;
+ }
+ }
+
+ if (need_copy) {
+ // We need to copy: If we need a mutable reference, or we're not supposed to convert
+ // (either because we're in the no-convert overload pass, or because we're explicitly
+ // instructed not to copy (via `py::arg().noconvert()`) we have to fail loading.
+ if (!convert || need_writeable) return false;
+
+ Array copy = Array::ensure(src);
+ if (!copy) return false;
+ fits = props::conformable(copy);
+ if (!fits || !fits.template stride_compatible<props>())
+ return false;
+ copy_or_ref = std::move(copy);
+ loader_life_support::add_patient(copy_or_ref);
+ }
+
+ ref.reset();
+ map.reset(new MapType(data(copy_or_ref), fits.rows, fits.cols, make_stride(fits.stride.outer(), fits.stride.inner())));
+ ref.reset(new Type(*map));
+
+ return true;
+ }
+
+ operator Type*() { return ref.get(); }
+ operator Type&() { return *ref; }
+ template <typename _T> using cast_op_type = pybind11::detail::cast_op_type<_T>;
+
+private:
+ template <typename T = Type, enable_if_t<is_eigen_mutable_map<T>::value, int> = 0>
+ Scalar *data(Array &a) { return a.mutable_data(); }
+
+ template <typename T = Type, enable_if_t<!is_eigen_mutable_map<T>::value, int> = 0>
+ const Scalar *data(Array &a) { return a.data(); }
+
+ // Attempt to figure out a constructor of `Stride` that will work.
+ // If both strides are fixed, use a default constructor:
+ template <typename S> using stride_ctor_default = bool_constant<
+ S::InnerStrideAtCompileTime != Eigen::Dynamic && S::OuterStrideAtCompileTime != Eigen::Dynamic &&
+ std::is_default_constructible<S>::value>;
+ // Otherwise, if there is a two-index constructor, assume it is (outer,inner) like
+ // Eigen::Stride, and use it:
+ template <typename S> using stride_ctor_dual = bool_constant<
+ !stride_ctor_default<S>::value && std::is_constructible<S, EigenIndex, EigenIndex>::value>;
+ // Otherwise, if there is a one-index constructor, and just one of the strides is dynamic, use
+ // it (passing whichever stride is dynamic).
+ template <typename S> using stride_ctor_outer = bool_constant<
+ !any_of<stride_ctor_default<S>, stride_ctor_dual<S>>::value &&
+ S::OuterStrideAtCompileTime == Eigen::Dynamic && S::InnerStrideAtCompileTime != Eigen::Dynamic &&
+ std::is_constructible<S, EigenIndex>::value>;
+ template <typename S> using stride_ctor_inner = bool_constant<
+ !any_of<stride_ctor_default<S>, stride_ctor_dual<S>>::value &&
+ S::InnerStrideAtCompileTime == Eigen::Dynamic && S::OuterStrideAtCompileTime != Eigen::Dynamic &&
+ std::is_constructible<S, EigenIndex>::value>;
+
+ template <typename S = StrideType, enable_if_t<stride_ctor_default<S>::value, int> = 0>
+ static S make_stride(EigenIndex, EigenIndex) { return S(); }
+ template <typename S = StrideType, enable_if_t<stride_ctor_dual<S>::value, int> = 0>
+ static S make_stride(EigenIndex outer, EigenIndex inner) { return S(outer, inner); }
+ template <typename S = StrideType, enable_if_t<stride_ctor_outer<S>::value, int> = 0>
+ static S make_stride(EigenIndex outer, EigenIndex) { return S(outer); }
+ template <typename S = StrideType, enable_if_t<stride_ctor_inner<S>::value, int> = 0>
+ static S make_stride(EigenIndex, EigenIndex inner) { return S(inner); }
+
+};
+
+// type_caster for special matrix types (e.g. DiagonalMatrix), which are EigenBase, but not
+// EigenDense (i.e. they don't have a data(), at least not with the usual matrix layout).
+// load() is not supported, but we can cast them into the python domain by first copying to a
+// regular Eigen::Matrix, then casting that.
+template <typename Type>
+struct type_caster<Type, enable_if_t<is_eigen_other<Type>::value>> {
+protected:
+ using Matrix = Eigen::Matrix<typename Type::Scalar, Type::RowsAtCompileTime, Type::ColsAtCompileTime>;
+ using props = EigenProps<Matrix>;
+public:
+ static handle cast(const Type &src, return_value_policy /* policy */, handle /* parent */) {
+ handle h = eigen_encapsulate<props>(new Matrix(src));
+ return h;
+ }
+ static handle cast(const Type *src, return_value_policy policy, handle parent) { return cast(*src, policy, parent); }
+
+ static constexpr auto name = props::descriptor;
+
+ // Explicitly delete these: support python -> C++ conversion on these (i.e. these can be return
+ // types but not bound arguments). We still provide them (with an explicitly delete) so that
+ // you end up here if you try anyway.
+ bool load(handle, bool) = delete;
+ operator Type() = delete;
+ template <typename> using cast_op_type = Type;
+};
+
+template<typename Type>
+struct type_caster<Type, enable_if_t<is_eigen_sparse<Type>::value>> {
+ using Scalar = typename Type::Scalar;
+ using StorageIndex = remove_reference_t<decltype(*std::declval<Type>().outerIndexPtr())>;
+ using Index = typename Type::Index;
+ static constexpr bool rowMajor = Type::IsRowMajor;
+
+ bool load(handle src, bool) {
+ if (!src)
+ return false;
+
+ auto obj = reinterpret_borrow<object>(src);
+ object sparse_module = module_::import("scipy.sparse");
+ object matrix_type = sparse_module.attr(
+ rowMajor ? "csr_matrix" : "csc_matrix");
+
+ if (!type::handle_of(obj).is(matrix_type)) {
+ try {
+ obj = matrix_type(obj);
+ } catch (const error_already_set &) {
+ return false;
+ }
+ }
+
+ auto values = array_t<Scalar>((object) obj.attr("data"));
+ auto innerIndices = array_t<StorageIndex>((object) obj.attr("indices"));
+ auto outerIndices = array_t<StorageIndex>((object) obj.attr("indptr"));
+ auto shape = pybind11::tuple((pybind11::object) obj.attr("shape"));
+ auto nnz = obj.attr("nnz").cast<Index>();
+
+ if (!values || !innerIndices || !outerIndices)
+ return false;
+
+ value = Eigen::MappedSparseMatrix<Scalar, Type::Flags, StorageIndex>(
+ shape[0].cast<Index>(), shape[1].cast<Index>(), nnz,
+ outerIndices.mutable_data(), innerIndices.mutable_data(), values.mutable_data());
+
+ return true;
+ }
+
+ static handle cast(const Type &src, return_value_policy /* policy */, handle /* parent */) {
+ const_cast<Type&>(src).makeCompressed();
+
+ object matrix_type = module_::import("scipy.sparse").attr(
+ rowMajor ? "csr_matrix" : "csc_matrix");
+
+ array data(src.nonZeros(), src.valuePtr());
+ array outerIndices((rowMajor ? src.rows() : src.cols()) + 1, src.outerIndexPtr());
+ array innerIndices(src.nonZeros(), src.innerIndexPtr());
+
+ return matrix_type(
+ std::make_tuple(data, innerIndices, outerIndices),
+ std::make_pair(src.rows(), src.cols())
+ ).release();
+ }
+
+ PYBIND11_TYPE_CASTER(Type, _<(Type::IsRowMajor) != 0>("scipy.sparse.csr_matrix[", "scipy.sparse.csc_matrix[")
+ + npy_format_descriptor<Scalar>::name + _("]"));
+};
+
+PYBIND11_NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
+
+#if defined(__GNUG__) || defined(__clang__)
+# pragma GCC diagnostic pop
+#elif defined(_MSC_VER)
+# pragma warning(pop)
+#endif
--- /dev/null
+/*
+ pybind11/embed.h: Support for embedding the interpreter
+
+ Copyright (c) 2017 Wenzel Jakob <wenzel.jakob@epfl.ch>
+
+ All rights reserved. Use of this source code is governed by a
+ BSD-style license that can be found in the LICENSE file.
+*/
+
+#pragma once
+
+#include "pybind11.h"
+#include "eval.h"
+
+#if defined(PYPY_VERSION)
+# error Embedding the interpreter is not supported with PyPy
+#endif
+
+#if PY_MAJOR_VERSION >= 3
+# define PYBIND11_EMBEDDED_MODULE_IMPL(name) \
+ extern "C" PyObject *pybind11_init_impl_##name(); \
+ extern "C" PyObject *pybind11_init_impl_##name() { \
+ return pybind11_init_wrapper_##name(); \
+ }
+#else
+# define PYBIND11_EMBEDDED_MODULE_IMPL(name) \
+ extern "C" void pybind11_init_impl_##name(); \
+ extern "C" void pybind11_init_impl_##name() { \
+ pybind11_init_wrapper_##name(); \
+ }
+#endif
+
+/** \rst
+ Add a new module to the table of builtins for the interpreter. Must be
+ defined in global scope. The first macro parameter is the name of the
+ module (without quotes). The second parameter is the variable which will
+ be used as the interface to add functions and classes to the module.
+
+ .. code-block:: cpp
+
+ PYBIND11_EMBEDDED_MODULE(example, m) {
+ // ... initialize functions and classes here
+ m.def("foo", []() {
+ return "Hello, World!";
+ });
+ }
+ \endrst */
+#define PYBIND11_EMBEDDED_MODULE(name, variable) \
+ static ::pybind11::module_::module_def \
+ PYBIND11_CONCAT(pybind11_module_def_, name); \
+ static void PYBIND11_CONCAT(pybind11_init_, name)(::pybind11::module_ &); \
+ static PyObject PYBIND11_CONCAT(*pybind11_init_wrapper_, name)() { \
+ auto m = ::pybind11::module_::create_extension_module( \
+ PYBIND11_TOSTRING(name), nullptr, \
+ &PYBIND11_CONCAT(pybind11_module_def_, name)); \
+ try { \
+ PYBIND11_CONCAT(pybind11_init_, name)(m); \
+ return m.ptr(); \
+ } PYBIND11_CATCH_INIT_EXCEPTIONS \
+ } \
+ PYBIND11_EMBEDDED_MODULE_IMPL(name) \
+ ::pybind11::detail::embedded_module PYBIND11_CONCAT(pybind11_module_, name) \
+ (PYBIND11_TOSTRING(name), \
+ PYBIND11_CONCAT(pybind11_init_impl_, name)); \
+ void PYBIND11_CONCAT(pybind11_init_, name)(::pybind11::module_ &variable)
+
+
+PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_BEGIN(detail)
+
+/// Python 2.7/3.x compatible version of `PyImport_AppendInittab` and error checks.
+struct embedded_module {
+#if PY_MAJOR_VERSION >= 3
+ using init_t = PyObject *(*)();
+#else
+ using init_t = void (*)();
+#endif
+ embedded_module(const char *name, init_t init) {
+ if (Py_IsInitialized())
+ pybind11_fail("Can't add new modules after the interpreter has been initialized");
+
+ auto result = PyImport_AppendInittab(name, init);
+ if (result == -1)
+ pybind11_fail("Insufficient memory to add a new module");
+ }
+};
+
+PYBIND11_NAMESPACE_END(detail)
+
+/** \rst
+ Initialize the Python interpreter. No other pybind11 or CPython API functions can be
+ called before this is done; with the exception of `PYBIND11_EMBEDDED_MODULE`. The
+ optional parameter can be used to skip the registration of signal handlers (see the
+ `Python documentation`_ for details). Calling this function again after the interpreter
+ has already been initialized is a fatal error.
+
+ If initializing the Python interpreter fails, then the program is terminated. (This
+ is controlled by the CPython runtime and is an exception to pybind11's normal behavior
+ of throwing exceptions on errors.)
+
+ .. _Python documentation: https://docs.python.org/3/c-api/init.html#c.Py_InitializeEx
+ \endrst */
+inline void initialize_interpreter(bool init_signal_handlers = true) {
+ if (Py_IsInitialized())
+ pybind11_fail("The interpreter is already running");
+
+ Py_InitializeEx(init_signal_handlers ? 1 : 0);
+
+ // Make .py files in the working directory available by default
+ module_::import("sys").attr("path").cast<list>().append(".");
+}
+
+/** \rst
+ Shut down the Python interpreter. No pybind11 or CPython API functions can be called
+ after this. In addition, pybind11 objects must not outlive the interpreter:
+
+ .. code-block:: cpp
+
+ { // BAD
+ py::initialize_interpreter();
+ auto hello = py::str("Hello, World!");
+ py::finalize_interpreter();
+ } // <-- BOOM, hello's destructor is called after interpreter shutdown
+
+ { // GOOD
+ py::initialize_interpreter();
+ { // scoped
+ auto hello = py::str("Hello, World!");
+ } // <-- OK, hello is cleaned up properly
+ py::finalize_interpreter();
+ }
+
+ { // BETTER
+ py::scoped_interpreter guard{};
+ auto hello = py::str("Hello, World!");
+ }
+
+ .. warning::
+
+ The interpreter can be restarted by calling `initialize_interpreter` again.
+ Modules created using pybind11 can be safely re-initialized. However, Python
+ itself cannot completely unload binary extension modules and there are several
+ caveats with regard to interpreter restarting. All the details can be found
+ in the CPython documentation. In short, not all interpreter memory may be
+ freed, either due to reference cycles or user-created global data.
+
+ \endrst */
+inline void finalize_interpreter() {
+ handle builtins(PyEval_GetBuiltins());
+ const char *id = PYBIND11_INTERNALS_ID;
+
+ // Get the internals pointer (without creating it if it doesn't exist). It's possible for the
+ // internals to be created during Py_Finalize() (e.g. if a py::capsule calls `get_internals()`
+ // during destruction), so we get the pointer-pointer here and check it after Py_Finalize().
+ detail::internals **internals_ptr_ptr = detail::get_internals_pp();
+ // It could also be stashed in builtins, so look there too:
+ if (builtins.contains(id) && isinstance<capsule>(builtins[id]))
+ internals_ptr_ptr = capsule(builtins[id]);
+
+ Py_Finalize();
+
+ if (internals_ptr_ptr) {
+ delete *internals_ptr_ptr;
+ *internals_ptr_ptr = nullptr;
+ }
+}
+
+/** \rst
+ Scope guard version of `initialize_interpreter` and `finalize_interpreter`.
+ This a move-only guard and only a single instance can exist.
+
+ .. code-block:: cpp
+
+ #include <pybind11/embed.h>
+
+ int main() {
+ py::scoped_interpreter guard{};
+ py::print(Hello, World!);
+ } // <-- interpreter shutdown
+ \endrst */
+class scoped_interpreter {
+public:
+ scoped_interpreter(bool init_signal_handlers = true) {
+ initialize_interpreter(init_signal_handlers);
+ }
+
+ scoped_interpreter(const scoped_interpreter &) = delete;
+ scoped_interpreter(scoped_interpreter &&other) noexcept { other.is_valid = false; }
+ scoped_interpreter &operator=(const scoped_interpreter &) = delete;
+ scoped_interpreter &operator=(scoped_interpreter &&) = delete;
+
+ ~scoped_interpreter() {
+ if (is_valid)
+ finalize_interpreter();
+ }
+
+private:
+ bool is_valid = true;
+};
+
+PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
--- /dev/null
+/*
+ pybind11/exec.h: Support for evaluating Python expressions and statements
+ from strings and files
+
+ Copyright (c) 2016 Klemens Morgenstern <klemens.morgenstern@ed-chemnitz.de> and
+ Wenzel Jakob <wenzel.jakob@epfl.ch>
+
+ All rights reserved. Use of this source code is governed by a
+ BSD-style license that can be found in the LICENSE file.
+*/
+
+#pragma once
+
+#include "pybind11.h"
+
+PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_BEGIN(detail)
+
+inline void ensure_builtins_in_globals(object &global) {
+ #if PY_VERSION_HEX < 0x03080000
+ // Running exec and eval on Python 2 and 3 adds `builtins` module under
+ // `__builtins__` key to globals if not yet present.
+ // Python 3.8 made PyRun_String behave similarly. Let's also do that for
+ // older versions, for consistency.
+ if (!global.contains("__builtins__"))
+ global["__builtins__"] = module_::import(PYBIND11_BUILTINS_MODULE);
+ #else
+ (void) global;
+ #endif
+}
+
+PYBIND11_NAMESPACE_END(detail)
+
+enum eval_mode {
+ /// Evaluate a string containing an isolated expression
+ eval_expr,
+
+ /// Evaluate a string containing a single statement. Returns \c none
+ eval_single_statement,
+
+ /// Evaluate a string containing a sequence of statement. Returns \c none
+ eval_statements
+};
+
+template <eval_mode mode = eval_expr>
+object eval(str expr, object global = globals(), object local = object()) {
+ if (!local)
+ local = global;
+
+ detail::ensure_builtins_in_globals(global);
+
+ /* PyRun_String does not accept a PyObject / encoding specifier,
+ this seems to be the only alternative */
+ std::string buffer = "# -*- coding: utf-8 -*-\n" + (std::string) expr;
+
+ int start;
+ switch (mode) {
+ case eval_expr: start = Py_eval_input; break;
+ case eval_single_statement: start = Py_single_input; break;
+ case eval_statements: start = Py_file_input; break;
+ default: pybind11_fail("invalid evaluation mode");
+ }
+
+ PyObject *result = PyRun_String(buffer.c_str(), start, global.ptr(), local.ptr());
+ if (!result)
+ throw error_already_set();
+ return reinterpret_steal<object>(result);
+}
+
+template <eval_mode mode = eval_expr, size_t N>
+object eval(const char (&s)[N], object global = globals(), object local = object()) {
+ /* Support raw string literals by removing common leading whitespace */
+ auto expr = (s[0] == '\n') ? str(module_::import("textwrap").attr("dedent")(s))
+ : str(s);
+ return eval<mode>(expr, global, local);
+}
+
+inline void exec(str expr, object global = globals(), object local = object()) {
+ eval<eval_statements>(expr, global, local);
+}
+
+template <size_t N>
+void exec(const char (&s)[N], object global = globals(), object local = object()) {
+ eval<eval_statements>(s, global, local);
+}
+
+#if defined(PYPY_VERSION) && PY_VERSION_HEX >= 0x03000000
+template <eval_mode mode = eval_statements>
+object eval_file(str, object, object) {
+ pybind11_fail("eval_file not supported in PyPy3. Use eval");
+}
+template <eval_mode mode = eval_statements>
+object eval_file(str, object) {
+ pybind11_fail("eval_file not supported in PyPy3. Use eval");
+}
+template <eval_mode mode = eval_statements>
+object eval_file(str) {
+ pybind11_fail("eval_file not supported in PyPy3. Use eval");
+}
+#else
+template <eval_mode mode = eval_statements>
+object eval_file(str fname, object global = globals(), object local = object()) {
+ if (!local)
+ local = global;
+
+ detail::ensure_builtins_in_globals(global);
+
+ int start;
+ switch (mode) {
+ case eval_expr: start = Py_eval_input; break;
+ case eval_single_statement: start = Py_single_input; break;
+ case eval_statements: start = Py_file_input; break;
+ default: pybind11_fail("invalid evaluation mode");
+ }
+
+ int closeFile = 1;
+ std::string fname_str = (std::string) fname;
+#if PY_VERSION_HEX >= 0x03040000
+ FILE *f = _Py_fopen_obj(fname.ptr(), "r");
+#elif PY_VERSION_HEX >= 0x03000000
+ FILE *f = _Py_fopen(fname.ptr(), "r");
+#else
+ /* No unicode support in open() :( */
+ auto fobj = reinterpret_steal<object>(PyFile_FromString(
+ const_cast<char *>(fname_str.c_str()),
+ const_cast<char*>("r")));
+ FILE *f = nullptr;
+ if (fobj)
+ f = PyFile_AsFile(fobj.ptr());
+ closeFile = 0;
+#endif
+ if (!f) {
+ PyErr_Clear();
+ pybind11_fail("File \"" + fname_str + "\" could not be opened!");
+ }
+
+#if PY_VERSION_HEX < 0x03000000 && defined(PYPY_VERSION)
+ PyObject *result = PyRun_File(f, fname_str.c_str(), start, global.ptr(),
+ local.ptr());
+ (void) closeFile;
+#else
+ PyObject *result = PyRun_FileEx(f, fname_str.c_str(), start, global.ptr(),
+ local.ptr(), closeFile);
+#endif
+
+ if (!result)
+ throw error_already_set();
+ return reinterpret_steal<object>(result);
+}
+#endif
+
+PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
--- /dev/null
+/*
+ pybind11/extensions.h: Extensions to the C++11 python binding
+ generator library for dune-fempy
+
+ Copyright (c) 2016 Andreas Dedner <a.s.dedner@warwick.ac.uk>
+ Copyright (c) 2016 Martin Nolte <nolte@mathematik.uni-freiburg.de>
+
+ All rights reserved. Use of this source code is governed by a
+ BSD-style license that can be found in the LICENSE file.
+*/
+
+#pragma once
+
+#include <stdexcept>
+#include <type_traits>
+#include <utility>
+
+#include "pybind11.h"
+#include "numpy.h"
+
+PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
+
+template <class T>
+inline bool already_registered() {
+ return static_cast<bool>(detail::get_type_info(typeid(T)));
+}
+
+
+template <class F>
+inline void handle_buffer_format(const pybind11::buffer_info &info, F &&f) {
+ if(info.format.size() != 1)
+ throw std::runtime_error("Buffer format '" + info.format + "' not supported.");
+ switch(info.format[0]) {
+ case 'h':
+ return f(format_descriptor<short>());
+ case 'H':
+ return f(format_descriptor<unsigned short>());
+ case 'i':
+ return f(format_descriptor<int>());
+ case 'I':
+ return f(format_descriptor<unsigned int>());
+ case 'l':
+ return f(format_descriptor<long>());
+ case 'L':
+ return f(format_descriptor<unsigned long>());
+ case 'q':
+ return f(format_descriptor<long long>());
+ case 'Q':
+ return f(format_descriptor<unsigned long long>());
+ case 'f':
+ return f(format_descriptor<float>());
+ case 'd':
+ return f(format_descriptor<double>());
+ default:
+ throw std::runtime_error("Buffer format '" + info.format + "' not supported.");
+ }
+}
+
+template <class T>
+inline void implicitly_convert_facades() {
+ auto implicit_caster = [](PyObject *obj, PyTypeObject *type) -> PyObject * {
+ return getattr(obj, "__impl__", nullptr).release().ptr();
+ };
+
+ if(auto tinfo = detail::get_type_info(typeid(T)))
+ tinfo->implicit_conversions.push_back(implicit_caster);
+ else
+ pybind11_fail("impplicitly_convert_facades: Unable to find type " + type_id<T>());
+}
+
+
+PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
--- /dev/null
+/*
+ pybind11/functional.h: std::function<> support
+
+ Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>
+
+ All rights reserved. Use of this source code is governed by a
+ BSD-style license that can be found in the LICENSE file.
+*/
+
+#pragma once
+
+#include "pybind11.h"
+#include <functional>
+
+PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_BEGIN(detail)
+
+template <typename Return, typename... Args>
+struct type_caster<std::function<Return(Args...)>> {
+ using type = std::function<Return(Args...)>;
+ using retval_type = conditional_t<std::is_same<Return, void>::value, void_type, Return>;
+ using function_type = Return (*) (Args...);
+
+public:
+ bool load(handle src, bool convert) {
+ if (src.is_none()) {
+ // Defer accepting None to other overloads (if we aren't in convert mode):
+ if (!convert) return false;
+ return true;
+ }
+
+ if (!isinstance<function>(src))
+ return false;
+
+ auto func = reinterpret_borrow<function>(src);
+
+ /*
+ When passing a C++ function as an argument to another C++
+ function via Python, every function call would normally involve
+ a full C++ -> Python -> C++ roundtrip, which can be prohibitive.
+ Here, we try to at least detect the case where the function is
+ stateless (i.e. function pointer or lambda function without
+ captured variables), in which case the roundtrip can be avoided.
+ */
+ if (auto cfunc = func.cpp_function()) {
+ auto c = reinterpret_borrow<capsule>(PyCFunction_GET_SELF(cfunc.ptr()));
+ auto rec = (function_record *) c;
+
+ if (rec && rec->is_stateless &&
+ same_type(typeid(function_type), *reinterpret_cast<const std::type_info *>(rec->data[1]))) {
+ struct capture { function_type f; };
+ value = ((capture *) &rec->data)->f;
+ return true;
+ }
+ }
+
+ // ensure GIL is held during functor destruction
+ struct func_handle {
+ function f;
+ func_handle(function&& f_) : f(std::move(f_)) {}
+ func_handle(const func_handle& f_) {
+ gil_scoped_acquire acq;
+ f = f_.f;
+ }
+ ~func_handle() {
+ gil_scoped_acquire acq;
+ function kill_f(std::move(f));
+ }
+ };
+
+ // to emulate 'move initialization capture' in C++11
+ struct func_wrapper {
+ func_handle hfunc;
+ func_wrapper(func_handle&& hf): hfunc(std::move(hf)) {}
+ Return operator()(Args... args) const {
+ gil_scoped_acquire acq;
+ object retval(hfunc.f(std::forward<Args>(args)...));
+ /* Visual studio 2015 parser issue: need parentheses around this expression */
+ return (retval.template cast<Return>());
+ }
+ };
+
+ value = func_wrapper(func_handle(std::move(func)));
+ return true;
+ }
+
+ template <typename Func>
+ static handle cast(Func &&f_, return_value_policy policy, handle /* parent */) {
+ if (!f_)
+ return none().inc_ref();
+
+ auto result = f_.template target<function_type>();
+ if (result)
+ return cpp_function(*result, policy).release();
+ else
+ return cpp_function(std::forward<Func>(f_), policy).release();
+ }
+
+ PYBIND11_TYPE_CASTER(type, _("Callable[[") + concat(make_caster<Args>::name...) + _("], ")
+ + make_caster<retval_type>::name + _("]"));
+};
+
+PYBIND11_NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
--- /dev/null
+/*
+ pybind11/iostream.h -- Tools to assist with redirecting cout and cerr to Python
+
+ Copyright (c) 2017 Henry F. Schreiner
+
+ All rights reserved. Use of this source code is governed by a
+ BSD-style license that can be found in the LICENSE file.
+*/
+
+#pragma once
+
+#include "pybind11.h"
+
+#include <streambuf>
+#include <ostream>
+#include <string>
+#include <memory>
+#include <iostream>
+
+PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_BEGIN(detail)
+
+// Buffer that writes to Python instead of C++
+class pythonbuf : public std::streambuf {
+private:
+ using traits_type = std::streambuf::traits_type;
+
+ const size_t buf_size;
+ std::unique_ptr<char[]> d_buffer;
+ object pywrite;
+ object pyflush;
+
+ int overflow(int c) override {
+ if (!traits_type::eq_int_type(c, traits_type::eof())) {
+ *pptr() = traits_type::to_char_type(c);
+ pbump(1);
+ }
+ return sync() == 0 ? traits_type::not_eof(c) : traits_type::eof();
+ }
+
+ // This function must be non-virtual to be called in a destructor. If the
+ // rare MSVC test failure shows up with this version, then this should be
+ // simplified to a fully qualified call.
+ int _sync() {
+ if (pbase() != pptr()) {
+ // This subtraction cannot be negative, so dropping the sign
+ str line(pbase(), static_cast<size_t>(pptr() - pbase()));
+
+ {
+ gil_scoped_acquire tmp;
+ pywrite(line);
+ pyflush();
+ }
+
+ setp(pbase(), epptr());
+ }
+ return 0;
+ }
+
+ int sync() override {
+ return _sync();
+ }
+
+public:
+
+ pythonbuf(object pyostream, size_t buffer_size = 1024)
+ : buf_size(buffer_size),
+ d_buffer(new char[buf_size]),
+ pywrite(pyostream.attr("write")),
+ pyflush(pyostream.attr("flush")) {
+ setp(d_buffer.get(), d_buffer.get() + buf_size - 1);
+ }
+
+ pythonbuf(pythonbuf&&) = default;
+
+ /// Sync before destroy
+ ~pythonbuf() override {
+ _sync();
+ }
+};
+
+PYBIND11_NAMESPACE_END(detail)
+
+
+/** \rst
+ This a move-only guard that redirects output.
+
+ .. code-block:: cpp
+
+ #include <pybind11/iostream.h>
+
+ ...
+
+ {
+ py::scoped_ostream_redirect output;
+ std::cout << "Hello, World!"; // Python stdout
+ } // <-- return std::cout to normal
+
+ You can explicitly pass the c++ stream and the python object,
+ for example to guard stderr instead.
+
+ .. code-block:: cpp
+
+ {
+ py::scoped_ostream_redirect output{std::cerr, py::module_::import("sys").attr("stderr")};
+ std::cerr << "Hello, World!";
+ }
+ \endrst */
+class scoped_ostream_redirect {
+protected:
+ std::streambuf *old;
+ std::ostream &costream;
+ detail::pythonbuf buffer;
+
+public:
+ scoped_ostream_redirect(
+ std::ostream &costream = std::cout,
+ object pyostream = module_::import("sys").attr("stdout"))
+ : costream(costream), buffer(pyostream) {
+ old = costream.rdbuf(&buffer);
+ }
+
+ ~scoped_ostream_redirect() {
+ costream.rdbuf(old);
+ }
+
+ scoped_ostream_redirect(const scoped_ostream_redirect &) = delete;
+ scoped_ostream_redirect(scoped_ostream_redirect &&other) = default;
+ scoped_ostream_redirect &operator=(const scoped_ostream_redirect &) = delete;
+ scoped_ostream_redirect &operator=(scoped_ostream_redirect &&) = delete;
+};
+
+
+/** \rst
+ Like `scoped_ostream_redirect`, but redirects cerr by default. This class
+ is provided primary to make ``py::call_guard`` easier to make.
+
+ .. code-block:: cpp
+
+ m.def("noisy_func", &noisy_func,
+ py::call_guard<scoped_ostream_redirect,
+ scoped_estream_redirect>());
+
+\endrst */
+class scoped_estream_redirect : public scoped_ostream_redirect {
+public:
+ scoped_estream_redirect(
+ std::ostream &costream = std::cerr,
+ object pyostream = module_::import("sys").attr("stderr"))
+ : scoped_ostream_redirect(costream,pyostream) {}
+};
+
+
+PYBIND11_NAMESPACE_BEGIN(detail)
+
+// Class to redirect output as a context manager. C++ backend.
+class OstreamRedirect {
+ bool do_stdout_;
+ bool do_stderr_;
+ std::unique_ptr<scoped_ostream_redirect> redirect_stdout;
+ std::unique_ptr<scoped_estream_redirect> redirect_stderr;
+
+public:
+ OstreamRedirect(bool do_stdout = true, bool do_stderr = true)
+ : do_stdout_(do_stdout), do_stderr_(do_stderr) {}
+
+ void enter() {
+ if (do_stdout_)
+ redirect_stdout.reset(new scoped_ostream_redirect());
+ if (do_stderr_)
+ redirect_stderr.reset(new scoped_estream_redirect());
+ }
+
+ void exit() {
+ redirect_stdout.reset();
+ redirect_stderr.reset();
+ }
+};
+
+PYBIND11_NAMESPACE_END(detail)
+
+/** \rst
+ This is a helper function to add a C++ redirect context manager to Python
+ instead of using a C++ guard. To use it, add the following to your binding code:
+
+ .. code-block:: cpp
+
+ #include <pybind11/iostream.h>
+
+ ...
+
+ py::add_ostream_redirect(m, "ostream_redirect");
+
+ You now have a Python context manager that redirects your output:
+
+ .. code-block:: python
+
+ with m.ostream_redirect():
+ m.print_to_cout_function()
+
+ This manager can optionally be told which streams to operate on:
+
+ .. code-block:: python
+
+ with m.ostream_redirect(stdout=true, stderr=true):
+ m.noisy_function_with_error_printing()
+
+ \endrst */
+inline class_<detail::OstreamRedirect> add_ostream_redirect(module_ m, std::string name = "ostream_redirect") {
+ return class_<detail::OstreamRedirect>(m, name.c_str(), module_local())
+ .def(init<bool,bool>(), arg("stdout")=true, arg("stderr")=true)
+ .def("__enter__", &detail::OstreamRedirect::enter)
+ .def("__exit__", [](detail::OstreamRedirect &self_, args) { self_.exit(); });
+}
+
+PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
--- /dev/null
+/*
+ pybind11/numpy.h: Basic NumPy support, vectorize() wrapper
+
+ Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>
+
+ All rights reserved. Use of this source code is governed by a
+ BSD-style license that can be found in the LICENSE file.
+*/
+
+#pragma once
+
+#include "pybind11.h"
+#include "complex.h"
+#include <numeric>
+#include <algorithm>
+#include <array>
+#include <cstdint>
+#include <cstdlib>
+#include <cstring>
+#include <sstream>
+#include <string>
+#include <functional>
+#include <type_traits>
+#include <utility>
+#include <vector>
+#include <typeindex>
+
+#if defined(_MSC_VER)
+# pragma warning(push)
+# pragma warning(disable: 4127) // warning C4127: Conditional expression is constant
+#endif
+
+/* This will be true on all flat address space platforms and allows us to reduce the
+ whole npy_intp / ssize_t / Py_intptr_t business down to just ssize_t for all size
+ and dimension types (e.g. shape, strides, indexing), instead of inflicting this
+ upon the library user. */
+static_assert(sizeof(::pybind11::ssize_t) == sizeof(Py_intptr_t), "ssize_t != Py_intptr_t");
+static_assert(std::is_signed<Py_intptr_t>::value, "Py_intptr_t must be signed");
+// We now can reinterpret_cast between py::ssize_t and Py_intptr_t (MSVC + PyPy cares)
+
+PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
+
+class array; // Forward declaration
+
+PYBIND11_NAMESPACE_BEGIN(detail)
+
+template <> struct handle_type_name<array> { static constexpr auto name = _("numpy.ndarray"); };
+
+template <typename type, typename SFINAE = void> struct npy_format_descriptor;
+
+struct PyArrayDescr_Proxy {
+ PyObject_HEAD
+ PyObject *typeobj;
+ char kind;
+ char type;
+ char byteorder;
+ char flags;
+ int type_num;
+ int elsize;
+ int alignment;
+ char *subarray;
+ PyObject *fields;
+ PyObject *names;
+};
+
+struct PyArray_Proxy {
+ PyObject_HEAD
+ char *data;
+ int nd;
+ ssize_t *dimensions;
+ ssize_t *strides;
+ PyObject *base;
+ PyObject *descr;
+ int flags;
+};
+
+struct PyVoidScalarObject_Proxy {
+ PyObject_VAR_HEAD
+ char *obval;
+ PyArrayDescr_Proxy *descr;
+ int flags;
+ PyObject *base;
+};
+
+struct numpy_type_info {
+ PyObject* dtype_ptr;
+ std::string format_str;
+};
+
+struct numpy_internals {
+ std::unordered_map<std::type_index, numpy_type_info> registered_dtypes;
+
+ numpy_type_info *get_type_info(const std::type_info& tinfo, bool throw_if_missing = true) {
+ auto it = registered_dtypes.find(std::type_index(tinfo));
+ if (it != registered_dtypes.end())
+ return &(it->second);
+ if (throw_if_missing)
+ pybind11_fail(std::string("NumPy type info missing for ") + tinfo.name());
+ return nullptr;
+ }
+
+ template<typename T> numpy_type_info *get_type_info(bool throw_if_missing = true) {
+ return get_type_info(typeid(typename std::remove_cv<T>::type), throw_if_missing);
+ }
+};
+
+inline PYBIND11_NOINLINE void load_numpy_internals(numpy_internals* &ptr) {
+ ptr = &get_or_create_shared_data<numpy_internals>("_numpy_internals");
+}
+
+inline numpy_internals& get_numpy_internals() {
+ static numpy_internals* ptr = nullptr;
+ if (!ptr)
+ load_numpy_internals(ptr);
+ return *ptr;
+}
+
+template <typename T> struct same_size {
+ template <typename U> using as = bool_constant<sizeof(T) == sizeof(U)>;
+};
+
+template <typename Concrete> constexpr int platform_lookup() { return -1; }
+
+// Lookup a type according to its size, and return a value corresponding to the NumPy typenum.
+template <typename Concrete, typename T, typename... Ts, typename... Ints>
+constexpr int platform_lookup(int I, Ints... Is) {
+ return sizeof(Concrete) == sizeof(T) ? I : platform_lookup<Concrete, Ts...>(Is...);
+}
+
+struct npy_api {
+ enum constants {
+ NPY_ARRAY_C_CONTIGUOUS_ = 0x0001,
+ NPY_ARRAY_F_CONTIGUOUS_ = 0x0002,
+ NPY_ARRAY_OWNDATA_ = 0x0004,
+ NPY_ARRAY_FORCECAST_ = 0x0010,
+ NPY_ARRAY_ENSUREARRAY_ = 0x0040,
+ NPY_ARRAY_ALIGNED_ = 0x0100,
+ NPY_ARRAY_WRITEABLE_ = 0x0400,
+ NPY_BOOL_ = 0,
+ NPY_BYTE_, NPY_UBYTE_,
+ NPY_SHORT_, NPY_USHORT_,
+ NPY_INT_, NPY_UINT_,
+ NPY_LONG_, NPY_ULONG_,
+ NPY_LONGLONG_, NPY_ULONGLONG_,
+ NPY_FLOAT_, NPY_DOUBLE_, NPY_LONGDOUBLE_,
+ NPY_CFLOAT_, NPY_CDOUBLE_, NPY_CLONGDOUBLE_,
+ NPY_OBJECT_ = 17,
+ NPY_STRING_, NPY_UNICODE_, NPY_VOID_,
+ // Platform-dependent normalization
+ NPY_INT8_ = NPY_BYTE_,
+ NPY_UINT8_ = NPY_UBYTE_,
+ NPY_INT16_ = NPY_SHORT_,
+ NPY_UINT16_ = NPY_USHORT_,
+ // `npy_common.h` defines the integer aliases. In order, it checks:
+ // NPY_BITSOF_LONG, NPY_BITSOF_LONGLONG, NPY_BITSOF_INT, NPY_BITSOF_SHORT, NPY_BITSOF_CHAR
+ // and assigns the alias to the first matching size, so we should check in this order.
+ NPY_INT32_ = platform_lookup<std::int32_t, long, int, short>(
+ NPY_LONG_, NPY_INT_, NPY_SHORT_),
+ NPY_UINT32_ = platform_lookup<std::uint32_t, unsigned long, unsigned int, unsigned short>(
+ NPY_ULONG_, NPY_UINT_, NPY_USHORT_),
+ NPY_INT64_ = platform_lookup<std::int64_t, long, long long, int>(
+ NPY_LONG_, NPY_LONGLONG_, NPY_INT_),
+ NPY_UINT64_ = platform_lookup<std::uint64_t, unsigned long, unsigned long long, unsigned int>(
+ NPY_ULONG_, NPY_ULONGLONG_, NPY_UINT_),
+ };
+
+ typedef struct {
+ Py_intptr_t *ptr;
+ int len;
+ } PyArray_Dims;
+
+ static npy_api& get() {
+ static npy_api api = lookup();
+ return api;
+ }
+
+ bool PyArray_Check_(PyObject *obj) const {
+ return (bool) PyObject_TypeCheck(obj, PyArray_Type_);
+ }
+ bool PyArrayDescr_Check_(PyObject *obj) const {
+ return (bool) PyObject_TypeCheck(obj, PyArrayDescr_Type_);
+ }
+
+ unsigned int (*PyArray_GetNDArrayCFeatureVersion_)();
+ PyObject *(*PyArray_DescrFromType_)(int);
+ PyObject *(*PyArray_NewFromDescr_)
+ (PyTypeObject *, PyObject *, int, Py_intptr_t const *,
+ Py_intptr_t const *, void *, int, PyObject *);
+ // Unused. Not removed because that affects ABI of the class.
+ PyObject *(*PyArray_DescrNewFromType_)(int);
+ int (*PyArray_CopyInto_)(PyObject *, PyObject *);
+ PyObject *(*PyArray_NewCopy_)(PyObject *, int);
+ PyTypeObject *PyArray_Type_;
+ PyTypeObject *PyVoidArrType_Type_;
+ PyTypeObject *PyArrayDescr_Type_;
+ PyObject *(*PyArray_DescrFromScalar_)(PyObject *);
+ PyObject *(*PyArray_FromAny_) (PyObject *, PyObject *, int, int, int, PyObject *);
+ int (*PyArray_DescrConverter_) (PyObject *, PyObject **);
+ bool (*PyArray_EquivTypes_) (PyObject *, PyObject *);
+ int (*PyArray_GetArrayParamsFromObject_)(PyObject *, PyObject *, unsigned char, PyObject **, int *,
+ Py_intptr_t *, PyObject **, PyObject *);
+ PyObject *(*PyArray_Squeeze_)(PyObject *);
+ // Unused. Not removed because that affects ABI of the class.
+ int (*PyArray_SetBaseObject_)(PyObject *, PyObject *);
+ PyObject* (*PyArray_Resize_)(PyObject*, PyArray_Dims*, int, int);
+private:
+ enum functions {
+ API_PyArray_GetNDArrayCFeatureVersion = 211,
+ API_PyArray_Type = 2,
+ API_PyArrayDescr_Type = 3,
+ API_PyVoidArrType_Type = 39,
+ API_PyArray_DescrFromType = 45,
+ API_PyArray_DescrFromScalar = 57,
+ API_PyArray_FromAny = 69,
+ API_PyArray_Resize = 80,
+ API_PyArray_CopyInto = 82,
+ API_PyArray_NewCopy = 85,
+ API_PyArray_NewFromDescr = 94,
+ API_PyArray_DescrNewFromType = 96,
+ API_PyArray_DescrConverter = 174,
+ API_PyArray_EquivTypes = 182,
+ API_PyArray_GetArrayParamsFromObject = 278,
+ API_PyArray_Squeeze = 136,
+ API_PyArray_SetBaseObject = 282
+ };
+
+ static npy_api lookup() {
+ module_ m = module_::import("numpy.core.multiarray");
+ auto c = m.attr("_ARRAY_API");
+#if PY_MAJOR_VERSION >= 3
+ void **api_ptr = (void **) PyCapsule_GetPointer(c.ptr(), NULL);
+#else
+ void **api_ptr = (void **) PyCObject_AsVoidPtr(c.ptr());
+#endif
+ npy_api api;
+#define DECL_NPY_API(Func) api.Func##_ = (decltype(api.Func##_)) api_ptr[API_##Func];
+ DECL_NPY_API(PyArray_GetNDArrayCFeatureVersion);
+ if (api.PyArray_GetNDArrayCFeatureVersion_() < 0x7)
+ pybind11_fail("pybind11 numpy support requires numpy >= 1.7.0");
+ DECL_NPY_API(PyArray_Type);
+ DECL_NPY_API(PyVoidArrType_Type);
+ DECL_NPY_API(PyArrayDescr_Type);
+ DECL_NPY_API(PyArray_DescrFromType);
+ DECL_NPY_API(PyArray_DescrFromScalar);
+ DECL_NPY_API(PyArray_FromAny);
+ DECL_NPY_API(PyArray_Resize);
+ DECL_NPY_API(PyArray_CopyInto);
+ DECL_NPY_API(PyArray_NewCopy);
+ DECL_NPY_API(PyArray_NewFromDescr);
+ DECL_NPY_API(PyArray_DescrNewFromType);
+ DECL_NPY_API(PyArray_DescrConverter);
+ DECL_NPY_API(PyArray_EquivTypes);
+ DECL_NPY_API(PyArray_GetArrayParamsFromObject);
+ DECL_NPY_API(PyArray_Squeeze);
+ DECL_NPY_API(PyArray_SetBaseObject);
+#undef DECL_NPY_API
+ return api;
+ }
+};
+
+inline PyArray_Proxy* array_proxy(void* ptr) {
+ return reinterpret_cast<PyArray_Proxy*>(ptr);
+}
+
+inline const PyArray_Proxy* array_proxy(const void* ptr) {
+ return reinterpret_cast<const PyArray_Proxy*>(ptr);
+}
+
+inline PyArrayDescr_Proxy* array_descriptor_proxy(PyObject* ptr) {
+ return reinterpret_cast<PyArrayDescr_Proxy*>(ptr);
+}
+
+inline const PyArrayDescr_Proxy* array_descriptor_proxy(const PyObject* ptr) {
+ return reinterpret_cast<const PyArrayDescr_Proxy*>(ptr);
+}
+
+inline bool check_flags(const void* ptr, int flag) {
+ return (flag == (array_proxy(ptr)->flags & flag));
+}
+
+template <typename T> struct is_std_array : std::false_type { };
+template <typename T, size_t N> struct is_std_array<std::array<T, N>> : std::true_type { };
+template <typename T> struct is_complex : std::false_type { };
+template <typename T> struct is_complex<std::complex<T>> : std::true_type { };
+
+template <typename T> struct array_info_scalar {
+ using type = T;
+ static constexpr bool is_array = false;
+ static constexpr bool is_empty = false;
+ static constexpr auto extents = _("");
+ static void append_extents(list& /* shape */) { }
+};
+// Computes underlying type and a comma-separated list of extents for array
+// types (any mix of std::array and built-in arrays). An array of char is
+// treated as scalar because it gets special handling.
+template <typename T> struct array_info : array_info_scalar<T> { };
+template <typename T, size_t N> struct array_info<std::array<T, N>> {
+ using type = typename array_info<T>::type;
+ static constexpr bool is_array = true;
+ static constexpr bool is_empty = (N == 0) || array_info<T>::is_empty;
+ static constexpr size_t extent = N;
+
+ // appends the extents to shape
+ static void append_extents(list& shape) {
+ shape.append(N);
+ array_info<T>::append_extents(shape);
+ }
+
+ static constexpr auto extents = _<array_info<T>::is_array>(
+ concat(_<N>(), array_info<T>::extents), _<N>()
+ );
+};
+// For numpy we have special handling for arrays of characters, so we don't include
+// the size in the array extents.
+template <size_t N> struct array_info<char[N]> : array_info_scalar<char[N]> { };
+template <size_t N> struct array_info<std::array<char, N>> : array_info_scalar<std::array<char, N>> { };
+template <typename T, size_t N> struct array_info<T[N]> : array_info<std::array<T, N>> { };
+template <typename T> using remove_all_extents_t = typename array_info<T>::type;
+
+template <typename T> using is_pod_struct = all_of<
+ std::is_standard_layout<T>, // since we're accessing directly in memory we need a standard layout type
+#if !defined(__GNUG__) || defined(_LIBCPP_VERSION) || defined(_GLIBCXX_USE_CXX11_ABI)
+ // _GLIBCXX_USE_CXX11_ABI indicates that we're using libstdc++ from GCC 5 or newer, independent
+ // of the actual compiler (Clang can also use libstdc++, but it always defines __GNUC__ == 4).
+ std::is_trivially_copyable<T>,
+#else
+ // GCC 4 doesn't implement is_trivially_copyable, so approximate it
+ std::is_trivially_destructible<T>,
+ satisfies_any_of<T, std::has_trivial_copy_constructor, std::has_trivial_copy_assign>,
+#endif
+ satisfies_none_of<T, std::is_reference, std::is_array, is_std_array, std::is_arithmetic, is_complex, std::is_enum>
+>;
+
+// Replacement for std::is_pod (deprecated in C++20)
+template <typename T> using is_pod = all_of<
+ std::is_standard_layout<T>,
+ std::is_trivial<T>
+>;
+
+template <ssize_t Dim = 0, typename Strides> ssize_t byte_offset_unsafe(const Strides &) { return 0; }
+template <ssize_t Dim = 0, typename Strides, typename... Ix>
+ssize_t byte_offset_unsafe(const Strides &strides, ssize_t i, Ix... index) {
+ return i * strides[Dim] + byte_offset_unsafe<Dim + 1>(strides, index...);
+}
+
+/**
+ * Proxy class providing unsafe, unchecked const access to array data. This is constructed through
+ * the `unchecked<T, N>()` method of `array` or the `unchecked<N>()` method of `array_t<T>`. `Dims`
+ * will be -1 for dimensions determined at runtime.
+ */
+template <typename T, ssize_t Dims>
+class unchecked_reference {
+protected:
+ static constexpr bool Dynamic = Dims < 0;
+ const unsigned char *data_;
+ // Storing the shape & strides in local variables (i.e. these arrays) allows the compiler to
+ // make large performance gains on big, nested loops, but requires compile-time dimensions
+ conditional_t<Dynamic, const ssize_t *, std::array<ssize_t, (size_t) Dims>>
+ shape_, strides_;
+ const ssize_t dims_;
+
+ friend class pybind11::array;
+ // Constructor for compile-time dimensions:
+ template <bool Dyn = Dynamic>
+ unchecked_reference(const void *data, const ssize_t *shape, const ssize_t *strides, enable_if_t<!Dyn, ssize_t>)
+ : data_{reinterpret_cast<const unsigned char *>(data)}, dims_{Dims} {
+ for (size_t i = 0; i < (size_t) dims_; i++) {
+ shape_[i] = shape[i];
+ strides_[i] = strides[i];
+ }
+ }
+ // Constructor for runtime dimensions:
+ template <bool Dyn = Dynamic>
+ unchecked_reference(const void *data, const ssize_t *shape, const ssize_t *strides, enable_if_t<Dyn, ssize_t> dims)
+ : data_{reinterpret_cast<const unsigned char *>(data)}, shape_{shape}, strides_{strides}, dims_{dims} {}
+
+public:
+ /**
+ * Unchecked const reference access to data at the given indices. For a compile-time known
+ * number of dimensions, this requires the correct number of arguments; for run-time
+ * dimensionality, this is not checked (and so is up to the caller to use safely).
+ */
+ template <typename... Ix> const T &operator()(Ix... index) const {
+ static_assert(ssize_t{sizeof...(Ix)} == Dims || Dynamic,
+ "Invalid number of indices for unchecked array reference");
+ return *reinterpret_cast<const T *>(data_ + byte_offset_unsafe(strides_, ssize_t(index)...));
+ }
+ /**
+ * Unchecked const reference access to data; this operator only participates if the reference
+ * is to a 1-dimensional array. When present, this is exactly equivalent to `obj(index)`.
+ */
+ template <ssize_t D = Dims, typename = enable_if_t<D == 1 || Dynamic>>
+ const T &operator[](ssize_t index) const { return operator()(index); }
+
+ /// Pointer access to the data at the given indices.
+ template <typename... Ix> const T *data(Ix... ix) const { return &operator()(ssize_t(ix)...); }
+
+ /// Returns the item size, i.e. sizeof(T)
+ constexpr static ssize_t itemsize() { return sizeof(T); }
+
+ /// Returns the shape (i.e. size) of dimension `dim`
+ ssize_t shape(ssize_t dim) const { return shape_[(size_t) dim]; }
+
+ /// Returns the number of dimensions of the array
+ ssize_t ndim() const { return dims_; }
+
+ /// Returns the total number of elements in the referenced array, i.e. the product of the shapes
+ template <bool Dyn = Dynamic>
+ enable_if_t<!Dyn, ssize_t> size() const {
+ return std::accumulate(shape_.begin(), shape_.end(), (ssize_t) 1, std::multiplies<ssize_t>());
+ }
+ template <bool Dyn = Dynamic>
+ enable_if_t<Dyn, ssize_t> size() const {
+ return std::accumulate(shape_, shape_ + ndim(), (ssize_t) 1, std::multiplies<ssize_t>());
+ }
+
+ /// Returns the total number of bytes used by the referenced data. Note that the actual span in
+ /// memory may be larger if the referenced array has non-contiguous strides (e.g. for a slice).
+ ssize_t nbytes() const {
+ return size() * itemsize();
+ }
+};
+
+template <typename T, ssize_t Dims>
+class unchecked_mutable_reference : public unchecked_reference<T, Dims> {
+ friend class pybind11::array;
+ using ConstBase = unchecked_reference<T, Dims>;
+ using ConstBase::ConstBase;
+ using ConstBase::Dynamic;
+public:
+ // Bring in const-qualified versions from base class
+ using ConstBase::operator();
+ using ConstBase::operator[];
+
+ /// Mutable, unchecked access to data at the given indices.
+ template <typename... Ix> T& operator()(Ix... index) {
+ static_assert(ssize_t{sizeof...(Ix)} == Dims || Dynamic,
+ "Invalid number of indices for unchecked array reference");
+ return const_cast<T &>(ConstBase::operator()(index...));
+ }
+ /**
+ * Mutable, unchecked access data at the given index; this operator only participates if the
+ * reference is to a 1-dimensional array (or has runtime dimensions). When present, this is
+ * exactly equivalent to `obj(index)`.
+ */
+ template <ssize_t D = Dims, typename = enable_if_t<D == 1 || Dynamic>>
+ T &operator[](ssize_t index) { return operator()(index); }
+
+ /// Mutable pointer access to the data at the given indices.
+ template <typename... Ix> T *mutable_data(Ix... ix) { return &operator()(ssize_t(ix)...); }
+};
+
+template <typename T, ssize_t Dim>
+struct type_caster<unchecked_reference<T, Dim>> {
+ static_assert(Dim == 0 && Dim > 0 /* always fail */, "unchecked array proxy object is not castable");
+};
+template <typename T, ssize_t Dim>
+struct type_caster<unchecked_mutable_reference<T, Dim>> : type_caster<unchecked_reference<T, Dim>> {};
+
+PYBIND11_NAMESPACE_END(detail)
+
+class dtype : public object {
+public:
+ PYBIND11_OBJECT_DEFAULT(dtype, object, detail::npy_api::get().PyArrayDescr_Check_);
+
+ explicit dtype(const buffer_info &info) {
+ dtype descr(_dtype_from_pep3118()(PYBIND11_STR_TYPE(info.format)));
+ // If info.itemsize == 0, use the value calculated from the format string
+ m_ptr = descr.strip_padding(info.itemsize ? info.itemsize : descr.itemsize()).release().ptr();
+ }
+
+ explicit dtype(const std::string &format) {
+ m_ptr = from_args(pybind11::str(format)).release().ptr();
+ }
+
+ dtype(const char *format) : dtype(std::string(format)) { }
+
+ dtype(list names, list formats, list offsets, ssize_t itemsize) {
+ dict args;
+ args["names"] = names;
+ args["formats"] = formats;
+ args["offsets"] = offsets;
+ args["itemsize"] = pybind11::int_(itemsize);
+ m_ptr = from_args(args).release().ptr();
+ }
+
+ /// This is essentially the same as calling numpy.dtype(args) in Python.
+ static dtype from_args(object args) {
+ PyObject *ptr = nullptr;
+ if (!detail::npy_api::get().PyArray_DescrConverter_(args.ptr(), &ptr) || !ptr)
+ throw error_already_set();
+ return reinterpret_steal<dtype>(ptr);
+ }
+
+ /// Return dtype associated with a C++ type.
+ template <typename T> static dtype of() {
+ return detail::npy_format_descriptor<typename std::remove_cv<T>::type>::dtype();
+ }
+
+ /// Size of the data type in bytes.
+ ssize_t itemsize() const {
+ return detail::array_descriptor_proxy(m_ptr)->elsize;
+ }
+
+ /// Returns true for structured data types.
+ bool has_fields() const {
+ return detail::array_descriptor_proxy(m_ptr)->names != nullptr;
+ }
+
+ /// Single-character type code.
+ char kind() const {
+ return detail::array_descriptor_proxy(m_ptr)->kind;
+ }
+
+private:
+ static object _dtype_from_pep3118() {
+ static PyObject *obj = module_::import("numpy.core._internal")
+ .attr("_dtype_from_pep3118").cast<object>().release().ptr();
+ return reinterpret_borrow<object>(obj);
+ }
+
+ dtype strip_padding(ssize_t itemsize) {
+ // Recursively strip all void fields with empty names that are generated for
+ // padding fields (as of NumPy v1.11).
+ if (!has_fields())
+ return *this;
+
+ struct field_descr { PYBIND11_STR_TYPE name; object format; pybind11::int_ offset; };
+ std::vector<field_descr> field_descriptors;
+
+ for (auto field : attr("fields").attr("items")()) {
+ auto spec = field.cast<tuple>();
+ auto name = spec[0].cast<pybind11::str>();
+ auto format = spec[1].cast<tuple>()[0].cast<dtype>();
+ auto offset = spec[1].cast<tuple>()[1].cast<pybind11::int_>();
+ if (!len(name) && format.kind() == 'V')
+ continue;
+ field_descriptors.push_back({(PYBIND11_STR_TYPE) name, format.strip_padding(format.itemsize()), offset});
+ }
+
+ std::sort(field_descriptors.begin(), field_descriptors.end(),
+ [](const field_descr& a, const field_descr& b) {
+ return a.offset.cast<int>() < b.offset.cast<int>();
+ });
+
+ list names, formats, offsets;
+ for (auto& descr : field_descriptors) {
+ names.append(descr.name);
+ formats.append(descr.format);
+ offsets.append(descr.offset);
+ }
+ return dtype(names, formats, offsets, itemsize);
+ }
+};
+
+class array : public buffer {
+public:
+ PYBIND11_OBJECT_CVT(array, buffer, detail::npy_api::get().PyArray_Check_, raw_array)
+
+ enum {
+ c_style = detail::npy_api::NPY_ARRAY_C_CONTIGUOUS_,
+ f_style = detail::npy_api::NPY_ARRAY_F_CONTIGUOUS_,
+ forcecast = detail::npy_api::NPY_ARRAY_FORCECAST_
+ };
+
+ array() : array(0, static_cast<const double *>(nullptr)) {}
+
+ using ShapeContainer = detail::any_container<ssize_t>;
+ using StridesContainer = detail::any_container<ssize_t>;
+
+ // Constructs an array taking shape/strides from arbitrary container types
+ array(const pybind11::dtype &dt, ShapeContainer shape, StridesContainer strides,
+ const void *ptr = nullptr, handle base = handle()) {
+
+ if (strides->empty())
+ *strides = detail::c_strides(*shape, dt.itemsize());
+
+ auto ndim = shape->size();
+ if (ndim != strides->size())
+ pybind11_fail("NumPy: shape ndim doesn't match strides ndim");
+ auto descr = dt;
+
+ int flags = 0;
+ if (base && ptr) {
+ if (isinstance<array>(base))
+ /* Copy flags from base (except ownership bit) */
+ flags = reinterpret_borrow<array>(base).flags() & ~detail::npy_api::NPY_ARRAY_OWNDATA_;
+ else
+ /* Writable by default, easy to downgrade later on if needed */
+ flags = detail::npy_api::NPY_ARRAY_WRITEABLE_;
+ }
+
+ auto &api = detail::npy_api::get();
+ auto tmp = reinterpret_steal<object>(api.PyArray_NewFromDescr_(
+ api.PyArray_Type_, descr.release().ptr(), (int) ndim,
+ // Use reinterpret_cast for PyPy on Windows (remove if fixed, checked on 7.3.1)
+ reinterpret_cast<Py_intptr_t*>(shape->data()),
+ reinterpret_cast<Py_intptr_t*>(strides->data()),
+ const_cast<void *>(ptr), flags, nullptr));
+ if (!tmp)
+ throw error_already_set();
+ if (ptr) {
+ if (base) {
+ api.PyArray_SetBaseObject_(tmp.ptr(), base.inc_ref().ptr());
+ } else {
+ tmp = reinterpret_steal<object>(api.PyArray_NewCopy_(tmp.ptr(), -1 /* any order */));
+ }
+ }
+ m_ptr = tmp.release().ptr();
+ }
+
+ array(const pybind11::dtype &dt, ShapeContainer shape, const void *ptr = nullptr, handle base = handle())
+ : array(dt, std::move(shape), {}, ptr, base) { }
+
+ template <typename T, typename = detail::enable_if_t<std::is_integral<T>::value && !std::is_same<bool, T>::value>>
+ array(const pybind11::dtype &dt, T count, const void *ptr = nullptr, handle base = handle())
+ : array(dt, {{count}}, ptr, base) { }
+
+ template <typename T>
+ array(ShapeContainer shape, StridesContainer strides, const T *ptr, handle base = handle())
+ : array(pybind11::dtype::of<T>(), std::move(shape), std::move(strides), ptr, base) { }
+
+ template <typename T>
+ array(ShapeContainer shape, const T *ptr, handle base = handle())
+ : array(std::move(shape), {}, ptr, base) { }
+
+ template <typename T>
+ explicit array(ssize_t count, const T *ptr, handle base = handle()) : array({count}, {}, ptr, base) { }
+
+ explicit array(const buffer_info &info, handle base = handle())
+ : array(pybind11::dtype(info), info.shape, info.strides, info.ptr, base) { }
+
+ /// Array descriptor (dtype)
+ pybind11::dtype dtype() const {
+ return reinterpret_borrow<pybind11::dtype>(detail::array_proxy(m_ptr)->descr);
+ }
+
+ /// Total number of elements
+ ssize_t size() const {
+ return std::accumulate(shape(), shape() + ndim(), (ssize_t) 1, std::multiplies<ssize_t>());
+ }
+
+ /// Byte size of a single element
+ ssize_t itemsize() const {
+ return detail::array_descriptor_proxy(detail::array_proxy(m_ptr)->descr)->elsize;
+ }
+
+ /// Total number of bytes
+ ssize_t nbytes() const {
+ return size() * itemsize();
+ }
+
+ /// Number of dimensions
+ ssize_t ndim() const {
+ return detail::array_proxy(m_ptr)->nd;
+ }
+
+ /// Base object
+ object base() const {
+ return reinterpret_borrow<object>(detail::array_proxy(m_ptr)->base);
+ }
+
+ /// Dimensions of the array
+ const ssize_t* shape() const {
+ return detail::array_proxy(m_ptr)->dimensions;
+ }
+
+ /// Dimension along a given axis
+ ssize_t shape(ssize_t dim) const {
+ if (dim >= ndim())
+ fail_dim_check(dim, "invalid axis");
+ return shape()[dim];
+ }
+
+ /// Strides of the array
+ const ssize_t* strides() const {
+ return detail::array_proxy(m_ptr)->strides;
+ }
+
+ /// Stride along a given axis
+ ssize_t strides(ssize_t dim) const {
+ if (dim >= ndim())
+ fail_dim_check(dim, "invalid axis");
+ return strides()[dim];
+ }
+
+ /// Return the NumPy array flags
+ int flags() const {
+ return detail::array_proxy(m_ptr)->flags;
+ }
+
+ /// If set, the array is writeable (otherwise the buffer is read-only)
+ bool writeable() const {
+ return detail::check_flags(m_ptr, detail::npy_api::NPY_ARRAY_WRITEABLE_);
+ }
+
+ /// If set, the array owns the data (will be freed when the array is deleted)
+ bool owndata() const {
+ return detail::check_flags(m_ptr, detail::npy_api::NPY_ARRAY_OWNDATA_);
+ }
+
+ /// Pointer to the contained data. If index is not provided, points to the
+ /// beginning of the buffer. May throw if the index would lead to out of bounds access.
+ template<typename... Ix> const void* data(Ix... index) const {
+ return static_cast<const void *>(detail::array_proxy(m_ptr)->data + offset_at(index...));
+ }
+
+ /// Mutable pointer to the contained data. If index is not provided, points to the
+ /// beginning of the buffer. May throw if the index would lead to out of bounds access.
+ /// May throw if the array is not writeable.
+ template<typename... Ix> void* mutable_data(Ix... index) {
+ check_writeable();
+ return static_cast<void *>(detail::array_proxy(m_ptr)->data + offset_at(index...));
+ }
+
+ /// Byte offset from beginning of the array to a given index (full or partial).
+ /// May throw if the index would lead to out of bounds access.
+ template<typename... Ix> ssize_t offset_at(Ix... index) const {
+ if ((ssize_t) sizeof...(index) > ndim())
+ fail_dim_check(sizeof...(index), "too many indices for an array");
+ return byte_offset(ssize_t(index)...);
+ }
+
+ ssize_t offset_at() const { return 0; }
+
+ /// Item count from beginning of the array to a given index (full or partial).
+ /// May throw if the index would lead to out of bounds access.
+ template<typename... Ix> ssize_t index_at(Ix... index) const {
+ return offset_at(index...) / itemsize();
+ }
+
+ /**
+ * Returns a proxy object that provides access to the array's data without bounds or
+ * dimensionality checking. Will throw if the array is missing the `writeable` flag. Use with
+ * care: the array must not be destroyed or reshaped for the duration of the returned object,
+ * and the caller must take care not to access invalid dimensions or dimension indices.
+ */
+ template <typename T, ssize_t Dims = -1> detail::unchecked_mutable_reference<T, Dims> mutable_unchecked() & {
+ if (Dims >= 0 && ndim() != Dims)
+ throw std::domain_error("array has incorrect number of dimensions: " + std::to_string(ndim()) +
+ "; expected " + std::to_string(Dims));
+ return detail::unchecked_mutable_reference<T, Dims>(mutable_data(), shape(), strides(), ndim());
+ }
+
+ /**
+ * Returns a proxy object that provides const access to the array's data without bounds or
+ * dimensionality checking. Unlike `mutable_unchecked()`, this does not require that the
+ * underlying array have the `writable` flag. Use with care: the array must not be destroyed or
+ * reshaped for the duration of the returned object, and the caller must take care not to access
+ * invalid dimensions or dimension indices.
+ */
+ template <typename T, ssize_t Dims = -1> detail::unchecked_reference<T, Dims> unchecked() const & {
+ if (Dims >= 0 && ndim() != Dims)
+ throw std::domain_error("array has incorrect number of dimensions: " + std::to_string(ndim()) +
+ "; expected " + std::to_string(Dims));
+ return detail::unchecked_reference<T, Dims>(data(), shape(), strides(), ndim());
+ }
+
+ /// Return a new view with all of the dimensions of length 1 removed
+ array squeeze() {
+ auto& api = detail::npy_api::get();
+ return reinterpret_steal<array>(api.PyArray_Squeeze_(m_ptr));
+ }
+
+ /// Resize array to given shape
+ /// If refcheck is true and more that one reference exist to this array
+ /// then resize will succeed only if it makes a reshape, i.e. original size doesn't change
+ void resize(ShapeContainer new_shape, bool refcheck = true) {
+ detail::npy_api::PyArray_Dims d = {
+ // Use reinterpret_cast for PyPy on Windows (remove if fixed, checked on 7.3.1)
+ reinterpret_cast<Py_intptr_t*>(new_shape->data()),
+ int(new_shape->size())
+ };
+ // try to resize, set ordering param to -1 cause it's not used anyway
+ auto new_array = reinterpret_steal<object>(
+ detail::npy_api::get().PyArray_Resize_(m_ptr, &d, int(refcheck), -1)
+ );
+ if (!new_array) throw error_already_set();
+ if (isinstance<array>(new_array)) { *this = std::move(new_array); }
+ }
+
+ /// Ensure that the argument is a NumPy array
+ /// In case of an error, nullptr is returned and the Python error is cleared.
+ static array ensure(handle h, int ExtraFlags = 0) {
+ auto result = reinterpret_steal<array>(raw_array(h.ptr(), ExtraFlags));
+ if (!result)
+ PyErr_Clear();
+ return result;
+ }
+
+protected:
+ template<typename, typename> friend struct detail::npy_format_descriptor;
+
+ void fail_dim_check(ssize_t dim, const std::string& msg) const {
+ throw index_error(msg + ": " + std::to_string(dim) +
+ " (ndim = " + std::to_string(ndim()) + ")");
+ }
+
+ template<typename... Ix> ssize_t byte_offset(Ix... index) const {
+ check_dimensions(index...);
+ return detail::byte_offset_unsafe(strides(), ssize_t(index)...);
+ }
+
+ void check_writeable() const {
+ if (!writeable())
+ throw std::domain_error("array is not writeable");
+ }
+
+ template<typename... Ix> void check_dimensions(Ix... index) const {
+ check_dimensions_impl(ssize_t(0), shape(), ssize_t(index)...);
+ }
+
+ void check_dimensions_impl(ssize_t, const ssize_t*) const { }
+
+ template<typename... Ix> void check_dimensions_impl(ssize_t axis, const ssize_t* shape, ssize_t i, Ix... index) const {
+ if (i >= *shape) {
+ throw index_error(std::string("index ") + std::to_string(i) +
+ " is out of bounds for axis " + std::to_string(axis) +
+ " with size " + std::to_string(*shape));
+ }
+ check_dimensions_impl(axis + 1, shape + 1, index...);
+ }
+
+ /// Create array from any object -- always returns a new reference
+ static PyObject *raw_array(PyObject *ptr, int ExtraFlags = 0) {
+ if (ptr == nullptr) {
+ PyErr_SetString(PyExc_ValueError, "cannot create a pybind11::array from a nullptr");
+ return nullptr;
+ }
+ return detail::npy_api::get().PyArray_FromAny_(
+ ptr, nullptr, 0, 0, detail::npy_api::NPY_ARRAY_ENSUREARRAY_ | ExtraFlags, nullptr);
+ }
+};
+
+template <typename T, int ExtraFlags = array::forcecast> class array_t : public array {
+private:
+ struct private_ctor {};
+ // Delegating constructor needed when both moving and accessing in the same constructor
+ array_t(private_ctor, ShapeContainer &&shape, StridesContainer &&strides, const T *ptr, handle base)
+ : array(std::move(shape), std::move(strides), ptr, base) {}
+public:
+ static_assert(!detail::array_info<T>::is_array, "Array types cannot be used with array_t");
+
+ using value_type = T;
+
+ array_t() : array(0, static_cast<const T *>(nullptr)) {}
+ array_t(handle h, borrowed_t) : array(h, borrowed_t{}) { }
+ array_t(handle h, stolen_t) : array(h, stolen_t{}) { }
+
+ PYBIND11_DEPRECATED("Use array_t<T>::ensure() instead")
+ array_t(handle h, bool is_borrowed) : array(raw_array_t(h.ptr()), stolen_t{}) {
+ if (!m_ptr) PyErr_Clear();
+ if (!is_borrowed) Py_XDECREF(h.ptr());
+ }
+
+ array_t(const object &o) : array(raw_array_t(o.ptr()), stolen_t{}) {
+ if (!m_ptr) throw error_already_set();
+ }
+
+ explicit array_t(const buffer_info& info, handle base = handle()) : array(info, base) { }
+
+ array_t(ShapeContainer shape, StridesContainer strides, const T *ptr = nullptr, handle base = handle())
+ : array(std::move(shape), std::move(strides), ptr, base) { }
+
+ explicit array_t(ShapeContainer shape, const T *ptr = nullptr, handle base = handle())
+ : array_t(private_ctor{}, std::move(shape),
+ ExtraFlags & f_style
+ ? detail::f_strides(*shape, itemsize())
+ : detail::c_strides(*shape, itemsize()),
+ ptr, base) { }
+
+ explicit array_t(ssize_t count, const T *ptr = nullptr, handle base = handle())
+ : array({count}, {}, ptr, base) { }
+
+ constexpr ssize_t itemsize() const {
+ return sizeof(T);
+ }
+
+ template<typename... Ix> ssize_t index_at(Ix... index) const {
+ return offset_at(index...) / itemsize();
+ }
+
+ template<typename... Ix> const T* data(Ix... index) const {
+ return static_cast<const T*>(array::data(index...));
+ }
+
+ template<typename... Ix> T* mutable_data(Ix... index) {
+ return static_cast<T*>(array::mutable_data(index...));
+ }
+
+ // Reference to element at a given index
+ template<typename... Ix> const T& at(Ix... index) const {
+ if ((ssize_t) sizeof...(index) != ndim())
+ fail_dim_check(sizeof...(index), "index dimension mismatch");
+ return *(static_cast<const T*>(array::data()) + byte_offset(ssize_t(index)...) / itemsize());
+ }
+
+ // Mutable reference to element at a given index
+ template<typename... Ix> T& mutable_at(Ix... index) {
+ if ((ssize_t) sizeof...(index) != ndim())
+ fail_dim_check(sizeof...(index), "index dimension mismatch");
+ return *(static_cast<T*>(array::mutable_data()) + byte_offset(ssize_t(index)...) / itemsize());
+ }
+
+ /**
+ * Returns a proxy object that provides access to the array's data without bounds or
+ * dimensionality checking. Will throw if the array is missing the `writeable` flag. Use with
+ * care: the array must not be destroyed or reshaped for the duration of the returned object,
+ * and the caller must take care not to access invalid dimensions or dimension indices.
+ */
+ template <ssize_t Dims = -1> detail::unchecked_mutable_reference<T, Dims> mutable_unchecked() & {
+ return array::mutable_unchecked<T, Dims>();
+ }
+
+ /**
+ * Returns a proxy object that provides const access to the array's data without bounds or
+ * dimensionality checking. Unlike `unchecked()`, this does not require that the underlying
+ * array have the `writable` flag. Use with care: the array must not be destroyed or reshaped
+ * for the duration of the returned object, and the caller must take care not to access invalid
+ * dimensions or dimension indices.
+ */
+ template <ssize_t Dims = -1> detail::unchecked_reference<T, Dims> unchecked() const & {
+ return array::unchecked<T, Dims>();
+ }
+
+ /// Ensure that the argument is a NumPy array of the correct dtype (and if not, try to convert
+ /// it). In case of an error, nullptr is returned and the Python error is cleared.
+ static array_t ensure(handle h) {
+ auto result = reinterpret_steal<array_t>(raw_array_t(h.ptr()));
+ if (!result)
+ PyErr_Clear();
+ return result;
+ }
+
+ static bool check_(handle h) {
+ const auto &api = detail::npy_api::get();
+ return api.PyArray_Check_(h.ptr())
+ && api.PyArray_EquivTypes_(detail::array_proxy(h.ptr())->descr, dtype::of<T>().ptr())
+ && detail::check_flags(h.ptr(), ExtraFlags & (array::c_style | array::f_style));
+ }
+
+protected:
+ /// Create array from any object -- always returns a new reference
+ static PyObject *raw_array_t(PyObject *ptr) {
+ if (ptr == nullptr) {
+ PyErr_SetString(PyExc_ValueError, "cannot create a pybind11::array_t from a nullptr");
+ return nullptr;
+ }
+ return detail::npy_api::get().PyArray_FromAny_(
+ ptr, dtype::of<T>().release().ptr(), 0, 0,
+ detail::npy_api::NPY_ARRAY_ENSUREARRAY_ | ExtraFlags, nullptr);
+ }
+};
+
+template <typename T>
+struct format_descriptor<T, detail::enable_if_t<detail::is_pod_struct<T>::value>> {
+ static std::string format() {
+ return detail::npy_format_descriptor<typename std::remove_cv<T>::type>::format();
+ }
+};
+
+template <size_t N> struct format_descriptor<char[N]> {
+ static std::string format() { return std::to_string(N) + "s"; }
+};
+template <size_t N> struct format_descriptor<std::array<char, N>> {
+ static std::string format() { return std::to_string(N) + "s"; }
+};
+
+template <typename T>
+struct format_descriptor<T, detail::enable_if_t<std::is_enum<T>::value>> {
+ static std::string format() {
+ return format_descriptor<
+ typename std::remove_cv<typename std::underlying_type<T>::type>::type>::format();
+ }
+};
+
+template <typename T>
+struct format_descriptor<T, detail::enable_if_t<detail::array_info<T>::is_array>> {
+ static std::string format() {
+ using namespace detail;
+ static constexpr auto extents = _("(") + array_info<T>::extents + _(")");
+ return extents.text + format_descriptor<remove_all_extents_t<T>>::format();
+ }
+};
+
+PYBIND11_NAMESPACE_BEGIN(detail)
+template <typename T, int ExtraFlags>
+struct pyobject_caster<array_t<T, ExtraFlags>> {
+ using type = array_t<T, ExtraFlags>;
+
+ bool load(handle src, bool convert) {
+ if (!convert && !type::check_(src))
+ return false;
+ value = type::ensure(src);
+ return static_cast<bool>(value);
+ }
+
+ static handle cast(const handle &src, return_value_policy /* policy */, handle /* parent */) {
+ return src.inc_ref();
+ }
+ PYBIND11_TYPE_CASTER(type, handle_type_name<type>::name);
+};
+
+template <typename T>
+struct compare_buffer_info<T, detail::enable_if_t<detail::is_pod_struct<T>::value>> {
+ static bool compare(const buffer_info& b) {
+ return npy_api::get().PyArray_EquivTypes_(dtype::of<T>().ptr(), dtype(b).ptr());
+ }
+};
+
+template <typename T, typename = void>
+struct npy_format_descriptor_name;
+
+template <typename T>
+struct npy_format_descriptor_name<T, enable_if_t<std::is_integral<T>::value>> {
+ static constexpr auto name = _<std::is_same<T, bool>::value>(
+ _("bool"), _<std::is_signed<T>::value>("numpy.int", "numpy.uint") + _<sizeof(T)*8>()
+ );
+};
+
+template <typename T>
+struct npy_format_descriptor_name<T, enable_if_t<std::is_floating_point<T>::value>> {
+ static constexpr auto name = _<std::is_same<T, float>::value || std::is_same<T, double>::value>(
+ _("numpy.float") + _<sizeof(T)*8>(), _("numpy.longdouble")
+ );
+};
+
+template <typename T>
+struct npy_format_descriptor_name<T, enable_if_t<is_complex<T>::value>> {
+ static constexpr auto name = _<std::is_same<typename T::value_type, float>::value
+ || std::is_same<typename T::value_type, double>::value>(
+ _("numpy.complex") + _<sizeof(typename T::value_type)*16>(), _("numpy.longcomplex")
+ );
+};
+
+template <typename T>
+struct npy_format_descriptor<T, enable_if_t<satisfies_any_of<T, std::is_arithmetic, is_complex>::value>>
+ : npy_format_descriptor_name<T> {
+private:
+ // NB: the order here must match the one in common.h
+ constexpr static const int values[15] = {
+ npy_api::NPY_BOOL_,
+ npy_api::NPY_BYTE_, npy_api::NPY_UBYTE_, npy_api::NPY_INT16_, npy_api::NPY_UINT16_,
+ npy_api::NPY_INT32_, npy_api::NPY_UINT32_, npy_api::NPY_INT64_, npy_api::NPY_UINT64_,
+ npy_api::NPY_FLOAT_, npy_api::NPY_DOUBLE_, npy_api::NPY_LONGDOUBLE_,
+ npy_api::NPY_CFLOAT_, npy_api::NPY_CDOUBLE_, npy_api::NPY_CLONGDOUBLE_
+ };
+
+public:
+ static constexpr int value = values[detail::is_fmt_numeric<T>::index];
+
+ static pybind11::dtype dtype() {
+ if (auto ptr = npy_api::get().PyArray_DescrFromType_(value))
+ return reinterpret_steal<pybind11::dtype>(ptr);
+ pybind11_fail("Unsupported buffer format!");
+ }
+};
+
+#define PYBIND11_DECL_CHAR_FMT \
+ static constexpr auto name = _("S") + _<N>(); \
+ static pybind11::dtype dtype() { return pybind11::dtype(std::string("S") + std::to_string(N)); }
+template <size_t N> struct npy_format_descriptor<char[N]> { PYBIND11_DECL_CHAR_FMT };
+template <size_t N> struct npy_format_descriptor<std::array<char, N>> { PYBIND11_DECL_CHAR_FMT };
+#undef PYBIND11_DECL_CHAR_FMT
+
+template<typename T> struct npy_format_descriptor<T, enable_if_t<array_info<T>::is_array>> {
+private:
+ using base_descr = npy_format_descriptor<typename array_info<T>::type>;
+public:
+ static_assert(!array_info<T>::is_empty, "Zero-sized arrays are not supported");
+
+ static constexpr auto name = _("(") + array_info<T>::extents + _(")") + base_descr::name;
+ static pybind11::dtype dtype() {
+ list shape;
+ array_info<T>::append_extents(shape);
+ return pybind11::dtype::from_args(pybind11::make_tuple(base_descr::dtype(), shape));
+ }
+};
+
+template<typename T> struct npy_format_descriptor<T, enable_if_t<std::is_enum<T>::value>> {
+private:
+ using base_descr = npy_format_descriptor<typename std::underlying_type<T>::type>;
+public:
+ static constexpr auto name = base_descr::name;
+ static pybind11::dtype dtype() { return base_descr::dtype(); }
+};
+
+struct field_descriptor {
+ const char *name;
+ ssize_t offset;
+ ssize_t size;
+ std::string format;
+ dtype descr;
+};
+
+inline PYBIND11_NOINLINE void register_structured_dtype(
+ any_container<field_descriptor> fields,
+ const std::type_info& tinfo, ssize_t itemsize,
+ bool (*direct_converter)(PyObject *, void *&)) {
+
+ auto& numpy_internals = get_numpy_internals();
+ if (numpy_internals.get_type_info(tinfo, false))
+ pybind11_fail("NumPy: dtype is already registered");
+
+ // Use ordered fields because order matters as of NumPy 1.14:
+ // https://docs.scipy.org/doc/numpy/release.html#multiple-field-indexing-assignment-of-structured-arrays
+ std::vector<field_descriptor> ordered_fields(std::move(fields));
+ std::sort(ordered_fields.begin(), ordered_fields.end(),
+ [](const field_descriptor &a, const field_descriptor &b) { return a.offset < b.offset; });
+
+ list names, formats, offsets;
+ for (auto& field : ordered_fields) {
+ if (!field.descr)
+ pybind11_fail(std::string("NumPy: unsupported field dtype: `") +
+ field.name + "` @ " + tinfo.name());
+ names.append(PYBIND11_STR_TYPE(field.name));
+ formats.append(field.descr);
+ offsets.append(pybind11::int_(field.offset));
+ }
+ auto dtype_ptr = pybind11::dtype(names, formats, offsets, itemsize).release().ptr();
+
+ // There is an existing bug in NumPy (as of v1.11): trailing bytes are
+ // not encoded explicitly into the format string. This will supposedly
+ // get fixed in v1.12; for further details, see these:
+ // - https://github.com/numpy/numpy/issues/7797
+ // - https://github.com/numpy/numpy/pull/7798
+ // Because of this, we won't use numpy's logic to generate buffer format
+ // strings and will just do it ourselves.
+ ssize_t offset = 0;
+ std::ostringstream oss;
+ // mark the structure as unaligned with '^', because numpy and C++ don't
+ // always agree about alignment (particularly for complex), and we're
+ // explicitly listing all our padding. This depends on none of the fields
+ // overriding the endianness. Putting the ^ in front of individual fields
+ // isn't guaranteed to work due to https://github.com/numpy/numpy/issues/9049
+ oss << "^T{";
+ for (auto& field : ordered_fields) {
+ if (field.offset > offset)
+ oss << (field.offset - offset) << 'x';
+ oss << field.format << ':' << field.name << ':';
+ offset = field.offset + field.size;
+ }
+ if (itemsize > offset)
+ oss << (itemsize - offset) << 'x';
+ oss << '}';
+ auto format_str = oss.str();
+
+ // Sanity check: verify that NumPy properly parses our buffer format string
+ auto& api = npy_api::get();
+ auto arr = array(buffer_info(nullptr, itemsize, format_str, 1));
+ if (!api.PyArray_EquivTypes_(dtype_ptr, arr.dtype().ptr()))
+ pybind11_fail("NumPy: invalid buffer descriptor!");
+
+ auto tindex = std::type_index(tinfo);
+ numpy_internals.registered_dtypes[tindex] = { dtype_ptr, format_str };
+ get_internals().direct_conversions[tindex].push_back(direct_converter);
+}
+
+template <typename T, typename SFINAE> struct npy_format_descriptor {
+ static_assert(is_pod_struct<T>::value, "Attempt to use a non-POD or unimplemented POD type as a numpy dtype");
+
+ static constexpr auto name = make_caster<T>::name;
+
+ static pybind11::dtype dtype() {
+ return reinterpret_borrow<pybind11::dtype>(dtype_ptr());
+ }
+
+ static std::string format() {
+ static auto format_str = get_numpy_internals().get_type_info<T>(true)->format_str;
+ return format_str;
+ }
+
+ static void register_dtype(any_container<field_descriptor> fields) {
+ register_structured_dtype(std::move(fields), typeid(typename std::remove_cv<T>::type),
+ sizeof(T), &direct_converter);
+ }
+
+private:
+ static PyObject* dtype_ptr() {
+ static PyObject* ptr = get_numpy_internals().get_type_info<T>(true)->dtype_ptr;
+ return ptr;
+ }
+
+ static bool direct_converter(PyObject *obj, void*& value) {
+ auto& api = npy_api::get();
+ if (!PyObject_TypeCheck(obj, api.PyVoidArrType_Type_))
+ return false;
+ if (auto descr = reinterpret_steal<object>(api.PyArray_DescrFromScalar_(obj))) {
+ if (api.PyArray_EquivTypes_(dtype_ptr(), descr.ptr())) {
+ value = ((PyVoidScalarObject_Proxy *) obj)->obval;
+ return true;
+ }
+ }
+ return false;
+ }
+};
+
+#ifdef __CLION_IDE__ // replace heavy macro with dummy code for the IDE (doesn't affect code)
+# define PYBIND11_NUMPY_DTYPE(Type, ...) ((void)0)
+# define PYBIND11_NUMPY_DTYPE_EX(Type, ...) ((void)0)
+#else
+
+#define PYBIND11_FIELD_DESCRIPTOR_EX(T, Field, Name) \
+ ::pybind11::detail::field_descriptor { \
+ Name, offsetof(T, Field), sizeof(decltype(std::declval<T>().Field)), \
+ ::pybind11::format_descriptor<decltype(std::declval<T>().Field)>::format(), \
+ ::pybind11::detail::npy_format_descriptor<decltype(std::declval<T>().Field)>::dtype() \
+ }
+
+// Extract name, offset and format descriptor for a struct field
+#define PYBIND11_FIELD_DESCRIPTOR(T, Field) PYBIND11_FIELD_DESCRIPTOR_EX(T, Field, #Field)
+
+// The main idea of this macro is borrowed from https://github.com/swansontec/map-macro
+// (C) William Swanson, Paul Fultz
+#define PYBIND11_EVAL0(...) __VA_ARGS__
+#define PYBIND11_EVAL1(...) PYBIND11_EVAL0 (PYBIND11_EVAL0 (PYBIND11_EVAL0 (__VA_ARGS__)))
+#define PYBIND11_EVAL2(...) PYBIND11_EVAL1 (PYBIND11_EVAL1 (PYBIND11_EVAL1 (__VA_ARGS__)))
+#define PYBIND11_EVAL3(...) PYBIND11_EVAL2 (PYBIND11_EVAL2 (PYBIND11_EVAL2 (__VA_ARGS__)))
+#define PYBIND11_EVAL4(...) PYBIND11_EVAL3 (PYBIND11_EVAL3 (PYBIND11_EVAL3 (__VA_ARGS__)))
+#define PYBIND11_EVAL(...) PYBIND11_EVAL4 (PYBIND11_EVAL4 (PYBIND11_EVAL4 (__VA_ARGS__)))
+#define PYBIND11_MAP_END(...)
+#define PYBIND11_MAP_OUT
+#define PYBIND11_MAP_COMMA ,
+#define PYBIND11_MAP_GET_END() 0, PYBIND11_MAP_END
+#define PYBIND11_MAP_NEXT0(test, next, ...) next PYBIND11_MAP_OUT
+#define PYBIND11_MAP_NEXT1(test, next) PYBIND11_MAP_NEXT0 (test, next, 0)
+#define PYBIND11_MAP_NEXT(test, next) PYBIND11_MAP_NEXT1 (PYBIND11_MAP_GET_END test, next)
+#if defined(_MSC_VER) && !defined(__clang__) // MSVC is not as eager to expand macros, hence this workaround
+#define PYBIND11_MAP_LIST_NEXT1(test, next) \
+ PYBIND11_EVAL0 (PYBIND11_MAP_NEXT0 (test, PYBIND11_MAP_COMMA next, 0))
+#else
+#define PYBIND11_MAP_LIST_NEXT1(test, next) \
+ PYBIND11_MAP_NEXT0 (test, PYBIND11_MAP_COMMA next, 0)
+#endif
+#define PYBIND11_MAP_LIST_NEXT(test, next) \
+ PYBIND11_MAP_LIST_NEXT1 (PYBIND11_MAP_GET_END test, next)
+#define PYBIND11_MAP_LIST0(f, t, x, peek, ...) \
+ f(t, x) PYBIND11_MAP_LIST_NEXT (peek, PYBIND11_MAP_LIST1) (f, t, peek, __VA_ARGS__)
+#define PYBIND11_MAP_LIST1(f, t, x, peek, ...) \
+ f(t, x) PYBIND11_MAP_LIST_NEXT (peek, PYBIND11_MAP_LIST0) (f, t, peek, __VA_ARGS__)
+// PYBIND11_MAP_LIST(f, t, a1, a2, ...) expands to f(t, a1), f(t, a2), ...
+#define PYBIND11_MAP_LIST(f, t, ...) \
+ PYBIND11_EVAL (PYBIND11_MAP_LIST1 (f, t, __VA_ARGS__, (), 0))
+
+#define PYBIND11_NUMPY_DTYPE(Type, ...) \
+ ::pybind11::detail::npy_format_descriptor<Type>::register_dtype \
+ (::std::vector<::pybind11::detail::field_descriptor> \
+ {PYBIND11_MAP_LIST (PYBIND11_FIELD_DESCRIPTOR, Type, __VA_ARGS__)})
+
+#if defined(_MSC_VER) && !defined(__clang__)
+#define PYBIND11_MAP2_LIST_NEXT1(test, next) \
+ PYBIND11_EVAL0 (PYBIND11_MAP_NEXT0 (test, PYBIND11_MAP_COMMA next, 0))
+#else
+#define PYBIND11_MAP2_LIST_NEXT1(test, next) \
+ PYBIND11_MAP_NEXT0 (test, PYBIND11_MAP_COMMA next, 0)
+#endif
+#define PYBIND11_MAP2_LIST_NEXT(test, next) \
+ PYBIND11_MAP2_LIST_NEXT1 (PYBIND11_MAP_GET_END test, next)
+#define PYBIND11_MAP2_LIST0(f, t, x1, x2, peek, ...) \
+ f(t, x1, x2) PYBIND11_MAP2_LIST_NEXT (peek, PYBIND11_MAP2_LIST1) (f, t, peek, __VA_ARGS__)
+#define PYBIND11_MAP2_LIST1(f, t, x1, x2, peek, ...) \
+ f(t, x1, x2) PYBIND11_MAP2_LIST_NEXT (peek, PYBIND11_MAP2_LIST0) (f, t, peek, __VA_ARGS__)
+// PYBIND11_MAP2_LIST(f, t, a1, a2, ...) expands to f(t, a1, a2), f(t, a3, a4), ...
+#define PYBIND11_MAP2_LIST(f, t, ...) \
+ PYBIND11_EVAL (PYBIND11_MAP2_LIST1 (f, t, __VA_ARGS__, (), 0))
+
+#define PYBIND11_NUMPY_DTYPE_EX(Type, ...) \
+ ::pybind11::detail::npy_format_descriptor<Type>::register_dtype \
+ (::std::vector<::pybind11::detail::field_descriptor> \
+ {PYBIND11_MAP2_LIST (PYBIND11_FIELD_DESCRIPTOR_EX, Type, __VA_ARGS__)})
+
+#endif // __CLION_IDE__
+
+class common_iterator {
+public:
+ using container_type = std::vector<ssize_t>;
+ using value_type = container_type::value_type;
+ using size_type = container_type::size_type;
+
+ common_iterator() : p_ptr(0), m_strides() {}
+
+ common_iterator(void* ptr, const container_type& strides, const container_type& shape)
+ : p_ptr(reinterpret_cast<char*>(ptr)), m_strides(strides.size()) {
+ m_strides.back() = static_cast<value_type>(strides.back());
+ for (size_type i = m_strides.size() - 1; i != 0; --i) {
+ size_type j = i - 1;
+ auto s = static_cast<value_type>(shape[i]);
+ m_strides[j] = strides[j] + m_strides[i] - strides[i] * s;
+ }
+ }
+
+ void increment(size_type dim) {
+ p_ptr += m_strides[dim];
+ }
+
+ void* data() const {
+ return p_ptr;
+ }
+
+private:
+ char* p_ptr;
+ container_type m_strides;
+};
+
+template <size_t N> class multi_array_iterator {
+public:
+ using container_type = std::vector<ssize_t>;
+
+ multi_array_iterator(const std::array<buffer_info, N> &buffers,
+ const container_type &shape)
+ : m_shape(shape.size()), m_index(shape.size(), 0),
+ m_common_iterator() {
+
+ // Manual copy to avoid conversion warning if using std::copy
+ for (size_t i = 0; i < shape.size(); ++i)
+ m_shape[i] = shape[i];
+
+ container_type strides(shape.size());
+ for (size_t i = 0; i < N; ++i)
+ init_common_iterator(buffers[i], shape, m_common_iterator[i], strides);
+ }
+
+ multi_array_iterator& operator++() {
+ for (size_t j = m_index.size(); j != 0; --j) {
+ size_t i = j - 1;
+ if (++m_index[i] != m_shape[i]) {
+ increment_common_iterator(i);
+ break;
+ } else {
+ m_index[i] = 0;
+ }
+ }
+ return *this;
+ }
+
+ template <size_t K, class T = void> T* data() const {
+ return reinterpret_cast<T*>(m_common_iterator[K].data());
+ }
+
+private:
+
+ using common_iter = common_iterator;
+
+ void init_common_iterator(const buffer_info &buffer,
+ const container_type &shape,
+ common_iter &iterator,
+ container_type &strides) {
+ auto buffer_shape_iter = buffer.shape.rbegin();
+ auto buffer_strides_iter = buffer.strides.rbegin();
+ auto shape_iter = shape.rbegin();
+ auto strides_iter = strides.rbegin();
+
+ while (buffer_shape_iter != buffer.shape.rend()) {
+ if (*shape_iter == *buffer_shape_iter)
+ *strides_iter = *buffer_strides_iter;
+ else
+ *strides_iter = 0;
+
+ ++buffer_shape_iter;
+ ++buffer_strides_iter;
+ ++shape_iter;
+ ++strides_iter;
+ }
+
+ std::fill(strides_iter, strides.rend(), 0);
+ iterator = common_iter(buffer.ptr, strides, shape);
+ }
+
+ void increment_common_iterator(size_t dim) {
+ for (auto &iter : m_common_iterator)
+ iter.increment(dim);
+ }
+
+ container_type m_shape;
+ container_type m_index;
+ std::array<common_iter, N> m_common_iterator;
+};
+
+enum class broadcast_trivial { non_trivial, c_trivial, f_trivial };
+
+// Populates the shape and number of dimensions for the set of buffers. Returns a broadcast_trivial
+// enum value indicating whether the broadcast is "trivial"--that is, has each buffer being either a
+// singleton or a full-size, C-contiguous (`c_trivial`) or Fortran-contiguous (`f_trivial`) storage
+// buffer; returns `non_trivial` otherwise.
+template <size_t N>
+broadcast_trivial broadcast(const std::array<buffer_info, N> &buffers, ssize_t &ndim, std::vector<ssize_t> &shape) {
+ ndim = std::accumulate(buffers.begin(), buffers.end(), ssize_t(0), [](ssize_t res, const buffer_info &buf) {
+ return std::max(res, buf.ndim);
+ });
+
+ shape.clear();
+ shape.resize((size_t) ndim, 1);
+
+ // Figure out the output size, and make sure all input arrays conform (i.e. are either size 1 or
+ // the full size).
+ for (size_t i = 0; i < N; ++i) {
+ auto res_iter = shape.rbegin();
+ auto end = buffers[i].shape.rend();
+ for (auto shape_iter = buffers[i].shape.rbegin(); shape_iter != end; ++shape_iter, ++res_iter) {
+ const auto &dim_size_in = *shape_iter;
+ auto &dim_size_out = *res_iter;
+
+ // Each input dimension can either be 1 or `n`, but `n` values must match across buffers
+ if (dim_size_out == 1)
+ dim_size_out = dim_size_in;
+ else if (dim_size_in != 1 && dim_size_in != dim_size_out)
+ pybind11_fail("pybind11::vectorize: incompatible size/dimension of inputs!");
+ }
+ }
+
+ bool trivial_broadcast_c = true;
+ bool trivial_broadcast_f = true;
+ for (size_t i = 0; i < N && (trivial_broadcast_c || trivial_broadcast_f); ++i) {
+ if (buffers[i].size == 1)
+ continue;
+
+ // Require the same number of dimensions:
+ if (buffers[i].ndim != ndim)
+ return broadcast_trivial::non_trivial;
+
+ // Require all dimensions be full-size:
+ if (!std::equal(buffers[i].shape.cbegin(), buffers[i].shape.cend(), shape.cbegin()))
+ return broadcast_trivial::non_trivial;
+
+ // Check for C contiguity (but only if previous inputs were also C contiguous)
+ if (trivial_broadcast_c) {
+ ssize_t expect_stride = buffers[i].itemsize;
+ auto end = buffers[i].shape.crend();
+ for (auto shape_iter = buffers[i].shape.crbegin(), stride_iter = buffers[i].strides.crbegin();
+ trivial_broadcast_c && shape_iter != end; ++shape_iter, ++stride_iter) {
+ if (expect_stride == *stride_iter)
+ expect_stride *= *shape_iter;
+ else
+ trivial_broadcast_c = false;
+ }
+ }
+
+ // Check for Fortran contiguity (if previous inputs were also F contiguous)
+ if (trivial_broadcast_f) {
+ ssize_t expect_stride = buffers[i].itemsize;
+ auto end = buffers[i].shape.cend();
+ for (auto shape_iter = buffers[i].shape.cbegin(), stride_iter = buffers[i].strides.cbegin();
+ trivial_broadcast_f && shape_iter != end; ++shape_iter, ++stride_iter) {
+ if (expect_stride == *stride_iter)
+ expect_stride *= *shape_iter;
+ else
+ trivial_broadcast_f = false;
+ }
+ }
+ }
+
+ return
+ trivial_broadcast_c ? broadcast_trivial::c_trivial :
+ trivial_broadcast_f ? broadcast_trivial::f_trivial :
+ broadcast_trivial::non_trivial;
+}
+
+template <typename T>
+struct vectorize_arg {
+ static_assert(!std::is_rvalue_reference<T>::value, "Functions with rvalue reference arguments cannot be vectorized");
+ // The wrapped function gets called with this type:
+ using call_type = remove_reference_t<T>;
+ // Is this a vectorized argument?
+ static constexpr bool vectorize =
+ satisfies_any_of<call_type, std::is_arithmetic, is_complex, is_pod>::value &&
+ satisfies_none_of<call_type, std::is_pointer, std::is_array, is_std_array, std::is_enum>::value &&
+ (!std::is_reference<T>::value ||
+ (std::is_lvalue_reference<T>::value && std::is_const<call_type>::value));
+ // Accept this type: an array for vectorized types, otherwise the type as-is:
+ using type = conditional_t<vectorize, array_t<remove_cv_t<call_type>, array::forcecast>, T>;
+};
+
+
+// py::vectorize when a return type is present
+template <typename Func, typename Return, typename... Args>
+struct vectorize_returned_array {
+ using Type = array_t<Return>;
+
+ static Type create(broadcast_trivial trivial, const std::vector<ssize_t> &shape) {
+ if (trivial == broadcast_trivial::f_trivial)
+ return array_t<Return, array::f_style>(shape);
+ else
+ return array_t<Return>(shape);
+ }
+
+ static Return *mutable_data(Type &array) {
+ return array.mutable_data();
+ }
+
+ static Return call(Func &f, Args &... args) {
+ return f(args...);
+ }
+
+ static void call(Return *out, size_t i, Func &f, Args &... args) {
+ out[i] = f(args...);
+ }
+};
+
+// py::vectorize when a return type is not present
+template <typename Func, typename... Args>
+struct vectorize_returned_array<Func, void, Args...> {
+ using Type = none;
+
+ static Type create(broadcast_trivial, const std::vector<ssize_t> &) {
+ return none();
+ }
+
+ static void *mutable_data(Type &) {
+ return nullptr;
+ }
+
+ static detail::void_type call(Func &f, Args &... args) {
+ f(args...);
+ return {};
+ }
+
+ static void call(void *, size_t, Func &f, Args &... args) {
+ f(args...);
+ }
+};
+
+
+template <typename Func, typename Return, typename... Args>
+struct vectorize_helper {
+
+// NVCC for some reason breaks if NVectorized is private
+#ifdef __CUDACC__
+public:
+#else
+private:
+#endif
+
+ static constexpr size_t N = sizeof...(Args);
+ static constexpr size_t NVectorized = constexpr_sum(vectorize_arg<Args>::vectorize...);
+ static_assert(NVectorized >= 1,
+ "pybind11::vectorize(...) requires a function with at least one vectorizable argument");
+
+public:
+ template <typename T>
+ explicit vectorize_helper(T &&f) : f(std::forward<T>(f)) { }
+
+ object operator()(typename vectorize_arg<Args>::type... args) {
+ return run(args...,
+ make_index_sequence<N>(),
+ select_indices<vectorize_arg<Args>::vectorize...>(),
+ make_index_sequence<NVectorized>());
+ }
+
+private:
+ remove_reference_t<Func> f;
+
+ // Internal compiler error in MSVC 19.16.27025.1 (Visual Studio 2017 15.9.4), when compiling with "/permissive-" flag
+ // when arg_call_types is manually inlined.
+ using arg_call_types = std::tuple<typename vectorize_arg<Args>::call_type...>;
+ template <size_t Index> using param_n_t = typename std::tuple_element<Index, arg_call_types>::type;
+
+ using returned_array = vectorize_returned_array<Func, Return, Args...>;
+
+ // Runs a vectorized function given arguments tuple and three index sequences:
+ // - Index is the full set of 0 ... (N-1) argument indices;
+ // - VIndex is the subset of argument indices with vectorized parameters, letting us access
+ // vectorized arguments (anything not in this sequence is passed through)
+ // - BIndex is a incremental sequence (beginning at 0) of the same size as VIndex, so that
+ // we can store vectorized buffer_infos in an array (argument VIndex has its buffer at
+ // index BIndex in the array).
+ template <size_t... Index, size_t... VIndex, size_t... BIndex> object run(
+ typename vectorize_arg<Args>::type &...args,
+ index_sequence<Index...> i_seq, index_sequence<VIndex...> vi_seq, index_sequence<BIndex...> bi_seq) {
+
+ // Pointers to values the function was called with; the vectorized ones set here will start
+ // out as array_t<T> pointers, but they will be changed them to T pointers before we make
+ // call the wrapped function. Non-vectorized pointers are left as-is.
+ std::array<void *, N> params{{ &args... }};
+
+ // The array of `buffer_info`s of vectorized arguments:
+ std::array<buffer_info, NVectorized> buffers{{ reinterpret_cast<array *>(params[VIndex])->request()... }};
+
+ /* Determine dimensions parameters of output array */
+ ssize_t nd = 0;
+ std::vector<ssize_t> shape(0);
+ auto trivial = broadcast(buffers, nd, shape);
+ auto ndim = (size_t) nd;
+
+ size_t size = std::accumulate(shape.begin(), shape.end(), (size_t) 1, std::multiplies<size_t>());
+
+ // If all arguments are 0-dimension arrays (i.e. single values) return a plain value (i.e.
+ // not wrapped in an array).
+ if (size == 1 && ndim == 0) {
+ PYBIND11_EXPAND_SIDE_EFFECTS(params[VIndex] = buffers[BIndex].ptr);
+ return cast(returned_array::call(f, *reinterpret_cast<param_n_t<Index> *>(params[Index])...));
+ }
+
+ auto result = returned_array::create(trivial, shape);
+
+ if (size == 0) return std::move(result);
+
+ /* Call the function */
+ auto mutable_data = returned_array::mutable_data(result);
+ if (trivial == broadcast_trivial::non_trivial)
+ apply_broadcast(buffers, params, mutable_data, size, shape, i_seq, vi_seq, bi_seq);
+ else
+ apply_trivial(buffers, params, mutable_data, size, i_seq, vi_seq, bi_seq);
+
+ return std::move(result);
+ }
+
+ template <size_t... Index, size_t... VIndex, size_t... BIndex>
+ void apply_trivial(std::array<buffer_info, NVectorized> &buffers,
+ std::array<void *, N> ¶ms,
+ Return *out,
+ size_t size,
+ index_sequence<Index...>, index_sequence<VIndex...>, index_sequence<BIndex...>) {
+
+ // Initialize an array of mutable byte references and sizes with references set to the
+ // appropriate pointer in `params`; as we iterate, we'll increment each pointer by its size
+ // (except for singletons, which get an increment of 0).
+ std::array<std::pair<unsigned char *&, const size_t>, NVectorized> vecparams{{
+ std::pair<unsigned char *&, const size_t>(
+ reinterpret_cast<unsigned char *&>(params[VIndex] = buffers[BIndex].ptr),
+ buffers[BIndex].size == 1 ? 0 : sizeof(param_n_t<VIndex>)
+ )...
+ }};
+
+ for (size_t i = 0; i < size; ++i) {
+ returned_array::call(out, i, f, *reinterpret_cast<param_n_t<Index> *>(params[Index])...);
+ for (auto &x : vecparams) x.first += x.second;
+ }
+ }
+
+ template <size_t... Index, size_t... VIndex, size_t... BIndex>
+ void apply_broadcast(std::array<buffer_info, NVectorized> &buffers,
+ std::array<void *, N> ¶ms,
+ Return *out,
+ size_t size,
+ const std::vector<ssize_t> &output_shape,
+ index_sequence<Index...>, index_sequence<VIndex...>, index_sequence<BIndex...>) {
+
+ multi_array_iterator<NVectorized> input_iter(buffers, output_shape);
+
+ for (size_t i = 0; i < size; ++i, ++input_iter) {
+ PYBIND11_EXPAND_SIDE_EFFECTS((
+ params[VIndex] = input_iter.template data<BIndex>()
+ ));
+ returned_array::call(out, i, f, *reinterpret_cast<param_n_t<Index> *>(std::get<Index>(params))...);
+ }
+ }
+};
+
+template <typename Func, typename Return, typename... Args>
+vectorize_helper<Func, Return, Args...>
+vectorize_extractor(const Func &f, Return (*) (Args ...)) {
+ return detail::vectorize_helper<Func, Return, Args...>(f);
+}
+
+template <typename T, int Flags> struct handle_type_name<array_t<T, Flags>> {
+ static constexpr auto name = _("numpy.ndarray[") + npy_format_descriptor<T>::name + _("]");
+};
+
+PYBIND11_NAMESPACE_END(detail)
+
+// Vanilla pointer vectorizer:
+template <typename Return, typename... Args>
+detail::vectorize_helper<Return (*)(Args...), Return, Args...>
+vectorize(Return (*f) (Args ...)) {
+ return detail::vectorize_helper<Return (*)(Args...), Return, Args...>(f);
+}
+
+// lambda vectorizer:
+template <typename Func, detail::enable_if_t<detail::is_lambda<Func>::value, int> = 0>
+auto vectorize(Func &&f) -> decltype(
+ detail::vectorize_extractor(std::forward<Func>(f), (detail::function_signature_t<Func> *) nullptr)) {
+ return detail::vectorize_extractor(std::forward<Func>(f), (detail::function_signature_t<Func> *) nullptr);
+}
+
+// Vectorize a class method (non-const):
+template <typename Return, typename Class, typename... Args,
+ typename Helper = detail::vectorize_helper<decltype(std::mem_fn(std::declval<Return (Class::*)(Args...)>())), Return, Class *, Args...>>
+Helper vectorize(Return (Class::*f)(Args...)) {
+ return Helper(std::mem_fn(f));
+}
+
+// Vectorize a class method (const):
+template <typename Return, typename Class, typename... Args,
+ typename Helper = detail::vectorize_helper<decltype(std::mem_fn(std::declval<Return (Class::*)(Args...) const>())), Return, const Class *, Args...>>
+Helper vectorize(Return (Class::*f)(Args...) const) {
+ return Helper(std::mem_fn(f));
+}
+
+PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
+
+#if defined(_MSC_VER)
+#pragma warning(pop)
+#endif
--- /dev/null
+/*
+ pybind11/operator.h: Metatemplates for operator overloading
+
+ Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>
+
+ All rights reserved. Use of this source code is governed by a
+ BSD-style license that can be found in the LICENSE file.
+*/
+
+#pragma once
+
+#include "pybind11.h"
+
+#if defined(__clang__) && !defined(__INTEL_COMPILER)
+# pragma clang diagnostic ignored "-Wunsequenced" // multiple unsequenced modifications to 'self' (when using def(py::self OP Type()))
+#elif defined(_MSC_VER)
+# pragma warning(push)
+# pragma warning(disable: 4127) // warning C4127: Conditional expression is constant
+#endif
+
+PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_BEGIN(detail)
+
+/// Enumeration with all supported operator types
+enum op_id : int {
+ op_add, op_sub, op_mul, op_div, op_mod, op_divmod, op_pow, op_lshift,
+ op_rshift, op_and, op_xor, op_or, op_neg, op_pos, op_abs, op_invert,
+ op_int, op_long, op_float, op_str, op_cmp, op_gt, op_ge, op_lt, op_le,
+ op_eq, op_ne, op_iadd, op_isub, op_imul, op_idiv, op_imod, op_ilshift,
+ op_irshift, op_iand, op_ixor, op_ior, op_complex, op_bool, op_nonzero,
+ op_repr, op_truediv, op_itruediv, op_hash
+};
+
+enum op_type : int {
+ op_l, /* base type on left */
+ op_r, /* base type on right */
+ op_u /* unary operator */
+};
+
+struct self_t { };
+static const self_t self = self_t();
+
+/// Type for an unused type slot
+struct undefined_t { };
+
+/// Don't warn about an unused variable
+inline self_t __self() { return self; }
+
+/// base template of operator implementations
+template <op_id, op_type, typename B, typename L, typename R> struct op_impl { };
+
+/// Operator implementation generator
+template <op_id id, op_type ot, typename L, typename R> struct op_ {
+ template <typename Class, typename... Extra> void execute(Class &cl, const Extra&... extra) const {
+ using Base = typename Class::type;
+ using L_type = conditional_t<std::is_same<L, self_t>::value, Base, L>;
+ using R_type = conditional_t<std::is_same<R, self_t>::value, Base, R>;
+ using op = op_impl<id, ot, Base, L_type, R_type>;
+ cl.def(op::name(), &op::execute, is_operator(), extra...);
+ #if PY_MAJOR_VERSION < 3
+ if (id == op_truediv || id == op_itruediv)
+ cl.def(id == op_itruediv ? "__idiv__" : ot == op_l ? "__div__" : "__rdiv__",
+ &op::execute, is_operator(), extra...);
+ #endif
+ }
+ template <typename Class, typename... Extra> void execute_cast(Class &cl, const Extra&... extra) const {
+ using Base = typename Class::type;
+ using L_type = conditional_t<std::is_same<L, self_t>::value, Base, L>;
+ using R_type = conditional_t<std::is_same<R, self_t>::value, Base, R>;
+ using op = op_impl<id, ot, Base, L_type, R_type>;
+ cl.def(op::name(), &op::execute_cast, is_operator(), extra...);
+ #if PY_MAJOR_VERSION < 3
+ if (id == op_truediv || id == op_itruediv)
+ cl.def(id == op_itruediv ? "__idiv__" : ot == op_l ? "__div__" : "__rdiv__",
+ &op::execute, is_operator(), extra...);
+ #endif
+ }
+};
+
+#define PYBIND11_BINARY_OPERATOR(id, rid, op, expr) \
+template <typename B, typename L, typename R> struct op_impl<op_##id, op_l, B, L, R> { \
+ static char const* name() { return "__" #id "__"; } \
+ static auto execute(const L &l, const R &r) -> decltype(expr) { return (expr); } \
+ static B execute_cast(const L &l, const R &r) { return B(expr); } \
+}; \
+template <typename B, typename L, typename R> struct op_impl<op_##id, op_r, B, L, R> { \
+ static char const* name() { return "__" #rid "__"; } \
+ static auto execute(const R &r, const L &l) -> decltype(expr) { return (expr); } \
+ static B execute_cast(const R &r, const L &l) { return B(expr); } \
+}; \
+inline op_<op_##id, op_l, self_t, self_t> op(const self_t &, const self_t &) { \
+ return op_<op_##id, op_l, self_t, self_t>(); \
+} \
+template <typename T> op_<op_##id, op_l, self_t, T> op(const self_t &, const T &) { \
+ return op_<op_##id, op_l, self_t, T>(); \
+} \
+template <typename T> op_<op_##id, op_r, T, self_t> op(const T &, const self_t &) { \
+ return op_<op_##id, op_r, T, self_t>(); \
+}
+
+#define PYBIND11_INPLACE_OPERATOR(id, op, expr) \
+template <typename B, typename L, typename R> struct op_impl<op_##id, op_l, B, L, R> { \
+ static char const* name() { return "__" #id "__"; } \
+ static auto execute(L &l, const R &r) -> decltype(expr) { return expr; } \
+ static B execute_cast(L &l, const R &r) { return B(expr); } \
+}; \
+template <typename T> op_<op_##id, op_l, self_t, T> op(const self_t &, const T &) { \
+ return op_<op_##id, op_l, self_t, T>(); \
+}
+
+#define PYBIND11_UNARY_OPERATOR(id, op, expr) \
+template <typename B, typename L> struct op_impl<op_##id, op_u, B, L, undefined_t> { \
+ static char const* name() { return "__" #id "__"; } \
+ static auto execute(const L &l) -> decltype(expr) { return expr; } \
+ static B execute_cast(const L &l) { return B(expr); } \
+}; \
+inline op_<op_##id, op_u, self_t, undefined_t> op(const self_t &) { \
+ return op_<op_##id, op_u, self_t, undefined_t>(); \
+}
+
+PYBIND11_BINARY_OPERATOR(sub, rsub, operator-, l - r)
+PYBIND11_BINARY_OPERATOR(add, radd, operator+, l + r)
+PYBIND11_BINARY_OPERATOR(mul, rmul, operator*, l * r)
+PYBIND11_BINARY_OPERATOR(truediv, rtruediv, operator/, l / r)
+PYBIND11_BINARY_OPERATOR(mod, rmod, operator%, l % r)
+PYBIND11_BINARY_OPERATOR(lshift, rlshift, operator<<, l << r)
+PYBIND11_BINARY_OPERATOR(rshift, rrshift, operator>>, l >> r)
+PYBIND11_BINARY_OPERATOR(and, rand, operator&, l & r)
+PYBIND11_BINARY_OPERATOR(xor, rxor, operator^, l ^ r)
+PYBIND11_BINARY_OPERATOR(eq, eq, operator==, l == r)
+PYBIND11_BINARY_OPERATOR(ne, ne, operator!=, l != r)
+PYBIND11_BINARY_OPERATOR(or, ror, operator|, l | r)
+PYBIND11_BINARY_OPERATOR(gt, lt, operator>, l > r)
+PYBIND11_BINARY_OPERATOR(ge, le, operator>=, l >= r)
+PYBIND11_BINARY_OPERATOR(lt, gt, operator<, l < r)
+PYBIND11_BINARY_OPERATOR(le, ge, operator<=, l <= r)
+//PYBIND11_BINARY_OPERATOR(pow, rpow, pow, std::pow(l, r))
+PYBIND11_INPLACE_OPERATOR(iadd, operator+=, l += r)
+PYBIND11_INPLACE_OPERATOR(isub, operator-=, l -= r)
+PYBIND11_INPLACE_OPERATOR(imul, operator*=, l *= r)
+PYBIND11_INPLACE_OPERATOR(itruediv, operator/=, l /= r)
+PYBIND11_INPLACE_OPERATOR(imod, operator%=, l %= r)
+PYBIND11_INPLACE_OPERATOR(ilshift, operator<<=, l <<= r)
+PYBIND11_INPLACE_OPERATOR(irshift, operator>>=, l >>= r)
+PYBIND11_INPLACE_OPERATOR(iand, operator&=, l &= r)
+PYBIND11_INPLACE_OPERATOR(ixor, operator^=, l ^= r)
+PYBIND11_INPLACE_OPERATOR(ior, operator|=, l |= r)
+PYBIND11_UNARY_OPERATOR(neg, operator-, -l)
+PYBIND11_UNARY_OPERATOR(pos, operator+, +l)
+// WARNING: This usage of `abs` should only be done for existing STL overloads.
+// Adding overloads directly in to the `std::` namespace is advised against:
+// https://en.cppreference.com/w/cpp/language/extending_std
+PYBIND11_UNARY_OPERATOR(abs, abs, std::abs(l))
+PYBIND11_UNARY_OPERATOR(hash, hash, std::hash<L>()(l))
+PYBIND11_UNARY_OPERATOR(invert, operator~, (~l))
+PYBIND11_UNARY_OPERATOR(bool, operator!, !!l)
+PYBIND11_UNARY_OPERATOR(int, int_, (int) l)
+PYBIND11_UNARY_OPERATOR(float, float_, (double) l)
+
+#undef PYBIND11_BINARY_OPERATOR
+#undef PYBIND11_INPLACE_OPERATOR
+#undef PYBIND11_UNARY_OPERATOR
+PYBIND11_NAMESPACE_END(detail)
+
+using detail::self;
+// Add named operators so that they are accessible via `py::`.
+using detail::hash;
+
+PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
+
+#if defined(_MSC_VER)
+# pragma warning(pop)
+#endif
--- /dev/null
+/*
+ pybind11/options.h: global settings that are configurable at runtime.
+
+ Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>
+
+ All rights reserved. Use of this source code is governed by a
+ BSD-style license that can be found in the LICENSE file.
+*/
+
+#pragma once
+
+#include "detail/common.h"
+
+PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
+
+class options {
+public:
+
+ // Default RAII constructor, which leaves settings as they currently are.
+ options() : previous_state(global_state()) {}
+
+ // Class is non-copyable.
+ options(const options&) = delete;
+ options& operator=(const options&) = delete;
+
+ // Destructor, which restores settings that were in effect before.
+ ~options() {
+ global_state() = previous_state;
+ }
+
+ // Setter methods (affect the global state):
+
+ options& disable_user_defined_docstrings() & { global_state().show_user_defined_docstrings = false; return *this; }
+
+ options& enable_user_defined_docstrings() & { global_state().show_user_defined_docstrings = true; return *this; }
+
+ options& disable_function_signatures() & { global_state().show_function_signatures = false; return *this; }
+
+ options& enable_function_signatures() & { global_state().show_function_signatures = true; return *this; }
+
+ // Getter methods (return the global state):
+
+ static bool show_user_defined_docstrings() { return global_state().show_user_defined_docstrings; }
+
+ static bool show_function_signatures() { return global_state().show_function_signatures; }
+
+ // This type is not meant to be allocated on the heap.
+ void* operator new(size_t) = delete;
+
+private:
+
+ struct state {
+ bool show_user_defined_docstrings = true; //< Include user-supplied texts in docstrings.
+ bool show_function_signatures = true; //< Include auto-generated function signatures in docstrings.
+ };
+
+ static state &global_state() {
+ static state instance;
+ return instance;
+ }
+
+ state previous_state;
+};
+
+PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
--- /dev/null
+/*
+ pybind11/pybind11.h: Main header file of the C++11 python
+ binding generator library
+
+ Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>
+
+ All rights reserved. Use of this source code is governed by a
+ BSD-style license that can be found in the LICENSE file.
+*/
+
+#pragma once
+
+#if defined(__INTEL_COMPILER)
+# pragma warning push
+# pragma warning disable 68 // integer conversion resulted in a change of sign
+# pragma warning disable 186 // pointless comparison of unsigned integer with zero
+# pragma warning disable 878 // incompatible exception specifications
+# pragma warning disable 1334 // the "template" keyword used for syntactic disambiguation may only be used within a template
+# pragma warning disable 1682 // implicit conversion of a 64-bit integral type to a smaller integral type (potential portability problem)
+# pragma warning disable 1786 // function "strdup" was declared deprecated
+# pragma warning disable 1875 // offsetof applied to non-POD (Plain Old Data) types is nonstandard
+# pragma warning disable 2196 // warning #2196: routine is both "inline" and "noinline"
+#elif defined(_MSC_VER)
+# pragma warning(push)
+# pragma warning(disable: 4100) // warning C4100: Unreferenced formal parameter
+# pragma warning(disable: 4127) // warning C4127: Conditional expression is constant
+# pragma warning(disable: 4512) // warning C4512: Assignment operator was implicitly defined as deleted
+# pragma warning(disable: 4800) // warning C4800: 'int': forcing value to bool 'true' or 'false' (performance warning)
+# pragma warning(disable: 4996) // warning C4996: The POSIX name for this item is deprecated. Instead, use the ISO C and C++ conformant name
+# pragma warning(disable: 4702) // warning C4702: unreachable code
+# pragma warning(disable: 4522) // warning C4522: multiple assignment operators specified
+# pragma warning(disable: 4505) // warning C4505: 'PySlice_GetIndicesEx': unreferenced local function has been removed (PyPy only)
+#elif defined(__GNUG__) && !defined(__clang__)
+# pragma GCC diagnostic push
+# pragma GCC diagnostic ignored "-Wunused-but-set-parameter"
+# pragma GCC diagnostic ignored "-Wunused-but-set-variable"
+# pragma GCC diagnostic ignored "-Wmissing-field-initializers"
+# pragma GCC diagnostic ignored "-Wstrict-aliasing"
+# pragma GCC diagnostic ignored "-Wattributes"
+# if __GNUC__ >= 7
+# pragma GCC diagnostic ignored "-Wnoexcept-type"
+# endif
+#endif
+
+#include "attr.h"
+#include "options.h"
+#include "detail/class.h"
+#include "detail/init.h"
+
+#include <memory>
+#include <vector>
+#include <string>
+#include <utility>
+
+#if defined(__GNUG__) && !defined(__clang__)
+# include <cxxabi.h>
+#endif
+
+PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
+
+/// Wraps an arbitrary C++ function/method/lambda function/.. into a callable Python object
+class cpp_function : public function {
+public:
+ cpp_function() = default;
+ cpp_function(std::nullptr_t) { }
+
+ /// Construct a cpp_function from a vanilla function pointer
+ template <typename Return, typename... Args, typename... Extra>
+ cpp_function(Return (*f)(Args...), const Extra&... extra) {
+ initialize(f, f, extra...);
+ }
+
+ /// Construct a cpp_function from a lambda function (possibly with internal state)
+ template <typename Func, typename... Extra,
+ typename = detail::enable_if_t<detail::is_lambda<Func>::value>>
+ cpp_function(Func &&f, const Extra&... extra) {
+ initialize(std::forward<Func>(f),
+ (detail::function_signature_t<Func> *) nullptr, extra...);
+ }
+
+ /// Construct a cpp_function from a class method (non-const, no ref-qualifier)
+ template <typename Return, typename Class, typename... Arg, typename... Extra>
+ cpp_function(Return (Class::*f)(Arg...), const Extra&... extra) {
+ initialize([f](Class *c, Arg... args) -> Return { return (c->*f)(std::forward<Arg>(args)...); },
+ (Return (*) (Class *, Arg...)) nullptr, extra...);
+ }
+
+ /// Construct a cpp_function from a class method (non-const, lvalue ref-qualifier)
+ /// A copy of the overload for non-const functions without explicit ref-qualifier
+ /// but with an added `&`.
+ template <typename Return, typename Class, typename... Arg, typename... Extra>
+ cpp_function(Return (Class::*f)(Arg...)&, const Extra&... extra) {
+ initialize([f](Class *c, Arg... args) -> Return { return (c->*f)(args...); },
+ (Return (*) (Class *, Arg...)) nullptr, extra...);
+ }
+
+ /// Construct a cpp_function from a class method (const, no ref-qualifier)
+ template <typename Return, typename Class, typename... Arg, typename... Extra>
+ cpp_function(Return (Class::*f)(Arg...) const, const Extra&... extra) {
+ initialize([f](const Class *c, Arg... args) -> Return { return (c->*f)(std::forward<Arg>(args)...); },
+ (Return (*)(const Class *, Arg ...)) nullptr, extra...);
+ }
+
+ /// Construct a cpp_function from a class method (const, lvalue ref-qualifier)
+ /// A copy of the overload for const functions without explicit ref-qualifier
+ /// but with an added `&`.
+ template <typename Return, typename Class, typename... Arg, typename... Extra>
+ cpp_function(Return (Class::*f)(Arg...) const&, const Extra&... extra) {
+ initialize([f](const Class *c, Arg... args) -> Return { return (c->*f)(args...); },
+ (Return (*)(const Class *, Arg ...)) nullptr, extra...);
+ }
+
+ /// Return the function name
+ object name() const { return attr("__name__"); }
+
+protected:
+ /// Space optimization: don't inline this frequently instantiated fragment
+ PYBIND11_NOINLINE detail::function_record *make_function_record() {
+ return new detail::function_record();
+ }
+
+ /// Special internal constructor for functors, lambda functions, etc.
+ template <typename Func, typename Return, typename... Args, typename... Extra>
+ void initialize(Func &&f, Return (*)(Args...), const Extra&... extra) {
+ using namespace detail;
+ struct capture { remove_reference_t<Func> f; };
+
+ /* Store the function including any extra state it might have (e.g. a lambda capture object) */
+ auto rec = make_function_record();
+
+ /* Store the capture object directly in the function record if there is enough space */
+ if (sizeof(capture) <= sizeof(rec->data)) {
+ /* Without these pragmas, GCC warns that there might not be
+ enough space to use the placement new operator. However, the
+ 'if' statement above ensures that this is the case. */
+#if defined(__GNUG__) && !defined(__clang__) && __GNUC__ >= 6
+# pragma GCC diagnostic push
+# pragma GCC diagnostic ignored "-Wplacement-new"
+#endif
+ new ((capture *) &rec->data) capture { std::forward<Func>(f) };
+#if defined(__GNUG__) && !defined(__clang__) && __GNUC__ >= 6
+# pragma GCC diagnostic pop
+#endif
+ if (!std::is_trivially_destructible<Func>::value)
+ rec->free_data = [](function_record *r) { ((capture *) &r->data)->~capture(); };
+ } else {
+ rec->data[0] = new capture { std::forward<Func>(f) };
+ rec->free_data = [](function_record *r) { delete ((capture *) r->data[0]); };
+ }
+
+ /* Type casters for the function arguments and return value */
+ using cast_in = argument_loader<Args...>;
+ using cast_out = make_caster<
+ conditional_t<std::is_void<Return>::value, void_type, Return>
+ >;
+
+ static_assert(expected_num_args<Extra...>(sizeof...(Args), cast_in::has_args, cast_in::has_kwargs),
+ "The number of argument annotations does not match the number of function arguments");
+
+ /* Dispatch code which converts function arguments and performs the actual function call */
+ rec->impl = [](function_call &call) -> handle {
+ cast_in args_converter;
+
+ /* Try to cast the function arguments into the C++ domain */
+ if (!args_converter.load_args(call))
+ return PYBIND11_TRY_NEXT_OVERLOAD;
+
+ /* Invoke call policy pre-call hook */
+ process_attributes<Extra...>::precall(call);
+
+ /* Get a pointer to the capture object */
+ auto data = (sizeof(capture) <= sizeof(call.func.data)
+ ? &call.func.data : call.func.data[0]);
+ auto *cap = const_cast<capture *>(reinterpret_cast<const capture *>(data));
+
+ /* Override policy for rvalues -- usually to enforce rvp::move on an rvalue */
+ return_value_policy policy = return_value_policy_override<Return>::policy(call.func.policy);
+
+ /* Function scope guard -- defaults to the compile-to-nothing `void_type` */
+ using Guard = extract_guard_t<Extra...>;
+
+ /* Perform the function call */
+ handle result = cast_out::cast(
+ std::move(args_converter).template call<Return, Guard>(cap->f), policy, call.parent);
+
+ /* Invoke call policy post-call hook */
+ process_attributes<Extra...>::postcall(call, result);
+
+ return result;
+ };
+
+ /* Process any user-provided function attributes */
+ process_attributes<Extra...>::init(extra..., rec);
+
+ {
+ constexpr bool has_kw_only_args = any_of<std::is_same<kw_only, Extra>...>::value,
+ has_pos_only_args = any_of<std::is_same<pos_only, Extra>...>::value,
+ has_args = any_of<std::is_same<args, Args>...>::value,
+ has_arg_annotations = any_of<is_keyword<Extra>...>::value;
+ static_assert(has_arg_annotations || !has_kw_only_args, "py::kw_only requires the use of argument annotations");
+ static_assert(has_arg_annotations || !has_pos_only_args, "py::pos_only requires the use of argument annotations (for docstrings and aligning the annotations to the argument)");
+ static_assert(!(has_args && has_kw_only_args), "py::kw_only cannot be combined with a py::args argument");
+ }
+
+ /* Generate a readable signature describing the function's arguments and return value types */
+ static constexpr auto signature = _("(") + cast_in::arg_names + _(") -> ") + cast_out::name;
+ PYBIND11_DESCR_CONSTEXPR auto types = decltype(signature)::types();
+
+ /* Register the function with Python from generic (non-templated) code */
+ initialize_generic(rec, signature.text, types.data(), sizeof...(Args));
+
+ if (cast_in::has_args) rec->has_args = true;
+ if (cast_in::has_kwargs) rec->has_kwargs = true;
+
+ /* Stash some additional information used by an important optimization in 'functional.h' */
+ using FunctionType = Return (*)(Args...);
+ constexpr bool is_function_ptr =
+ std::is_convertible<Func, FunctionType>::value &&
+ sizeof(capture) == sizeof(void *);
+ if (is_function_ptr) {
+ rec->is_stateless = true;
+ rec->data[1] = const_cast<void *>(reinterpret_cast<const void *>(&typeid(FunctionType)));
+ }
+ }
+
+ /// Register a function call with Python (generic non-templated code goes here)
+ void initialize_generic(detail::function_record *rec, const char *text,
+ const std::type_info *const *types, size_t args) {
+
+ /* Create copies of all referenced C-style strings */
+ rec->name = strdup(rec->name ? rec->name : "");
+ if (rec->doc) rec->doc = strdup(rec->doc);
+ for (auto &a: rec->args) {
+ if (a.name)
+ a.name = strdup(a.name);
+ if (a.descr)
+ a.descr = strdup(a.descr);
+ else if (a.value)
+ a.descr = strdup(repr(a.value).cast<std::string>().c_str());
+ }
+
+ rec->is_constructor = !strcmp(rec->name, "__init__") || !strcmp(rec->name, "__setstate__");
+
+#if !defined(NDEBUG) && !defined(PYBIND11_DISABLE_NEW_STYLE_INIT_WARNING)
+ if (rec->is_constructor && !rec->is_new_style_constructor) {
+ const auto class_name = detail::get_fully_qualified_tp_name((PyTypeObject *) rec->scope.ptr());
+ const auto func_name = std::string(rec->name);
+ PyErr_WarnEx(
+ PyExc_FutureWarning,
+ ("pybind11-bound class '" + class_name + "' is using an old-style "
+ "placement-new '" + func_name + "' which has been deprecated. See "
+ "the upgrade guide in pybind11's docs. This message is only visible "
+ "when compiled in debug mode.").c_str(), 0
+ );
+ }
+#endif
+
+ /* Generate a proper function signature */
+ std::string signature;
+ size_t type_index = 0, arg_index = 0;
+ for (auto *pc = text; *pc != '\0'; ++pc) {
+ const auto c = *pc;
+
+ if (c == '{') {
+ // Write arg name for everything except *args and **kwargs.
+ if (*(pc + 1) == '*')
+ continue;
+ // Separator for keyword-only arguments, placed before the kw
+ // arguments start
+ if (rec->nargs_kw_only > 0 && arg_index + rec->nargs_kw_only == args)
+ signature += "*, ";
+ if (arg_index < rec->args.size() && rec->args[arg_index].name) {
+ signature += rec->args[arg_index].name;
+ } else if (arg_index == 0 && rec->is_method) {
+ signature += "self";
+ } else {
+ signature += "arg" + std::to_string(arg_index - (rec->is_method ? 1 : 0));
+ }
+ signature += ": ";
+ } else if (c == '}') {
+ // Write default value if available.
+ if (arg_index < rec->args.size() && rec->args[arg_index].descr) {
+ signature += " = ";
+ signature += rec->args[arg_index].descr;
+ }
+ // Separator for positional-only arguments (placed after the
+ // argument, rather than before like *
+ if (rec->nargs_pos_only > 0 && (arg_index + 1) == rec->nargs_pos_only)
+ signature += ", /";
+ arg_index++;
+ } else if (c == '%') {
+ const std::type_info *t = types[type_index++];
+ if (!t)
+ pybind11_fail("Internal error while parsing type signature (1)");
+ if (auto tinfo = detail::get_type_info(*t)) {
+ handle th((PyObject *) tinfo->type);
+ signature +=
+ th.attr("__module__").cast<std::string>() + "." +
+ th.attr("__qualname__").cast<std::string>(); // Python 3.3+, but we backport it to earlier versions
+ } else if (rec->is_new_style_constructor && arg_index == 0) {
+ // A new-style `__init__` takes `self` as `value_and_holder`.
+ // Rewrite it to the proper class type.
+ signature +=
+ rec->scope.attr("__module__").cast<std::string>() + "." +
+ rec->scope.attr("__qualname__").cast<std::string>();
+ } else {
+ std::string tname(t->name());
+ detail::clean_type_id(tname);
+ signature += tname;
+ }
+ } else {
+ signature += c;
+ }
+ }
+
+ if (arg_index != args || types[type_index] != nullptr)
+ pybind11_fail("Internal error while parsing type signature (2)");
+
+#if PY_MAJOR_VERSION < 3
+ if (strcmp(rec->name, "__next__") == 0) {
+ std::free(rec->name);
+ rec->name = strdup("next");
+ } else if (strcmp(rec->name, "__bool__") == 0) {
+ std::free(rec->name);
+ rec->name = strdup("__nonzero__");
+ }
+#endif
+ rec->signature = strdup(signature.c_str());
+ rec->args.shrink_to_fit();
+ rec->nargs = (std::uint16_t) args;
+
+ if (rec->sibling && PYBIND11_INSTANCE_METHOD_CHECK(rec->sibling.ptr()))
+ rec->sibling = PYBIND11_INSTANCE_METHOD_GET_FUNCTION(rec->sibling.ptr());
+
+ detail::function_record *chain = nullptr, *chain_start = rec;
+ if (rec->sibling) {
+ if (PyCFunction_Check(rec->sibling.ptr())) {
+ auto rec_capsule = reinterpret_borrow<capsule>(PyCFunction_GET_SELF(rec->sibling.ptr()));
+ chain = (detail::function_record *) rec_capsule;
+ /* Never append a method to an overload chain of a parent class;
+ instead, hide the parent's overloads in this case */
+ if (!chain->scope.is(rec->scope))
+ chain = nullptr;
+ }
+ // Don't trigger for things like the default __init__, which are wrapper_descriptors that we are intentionally replacing
+ else if (!rec->sibling.is_none() && rec->name[0] != '_')
+ pybind11_fail("Cannot overload existing non-function object \"" + std::string(rec->name) +
+ "\" with a function of the same name");
+ }
+
+ if (!chain) {
+ /* No existing overload was found, create a new function object */
+ rec->def = new PyMethodDef();
+ std::memset(rec->def, 0, sizeof(PyMethodDef));
+ rec->def->ml_name = rec->name;
+ rec->def->ml_meth = reinterpret_cast<PyCFunction>(reinterpret_cast<void (*) (void)>(*dispatcher));
+ rec->def->ml_flags = METH_VARARGS | METH_KEYWORDS;
+
+ capsule rec_capsule(rec, [](void *ptr) {
+ destruct((detail::function_record *) ptr);
+ });
+
+ object scope_module;
+ if (rec->scope) {
+ if (hasattr(rec->scope, "__module__")) {
+ scope_module = rec->scope.attr("__module__");
+ } else if (hasattr(rec->scope, "__name__")) {
+ scope_module = rec->scope.attr("__name__");
+ }
+ }
+
+ m_ptr = PyCFunction_NewEx(rec->def, rec_capsule.ptr(), scope_module.ptr());
+ if (!m_ptr)
+ pybind11_fail("cpp_function::cpp_function(): Could not allocate function object");
+ } else {
+ /* Append at the beginning or end of the overload chain */
+ m_ptr = rec->sibling.ptr();
+ inc_ref();
+ if (chain->is_method != rec->is_method)
+ pybind11_fail("overloading a method with both static and instance methods is not supported; "
+ #if defined(NDEBUG)
+ "compile in debug mode for more details"
+ #else
+ "error while attempting to bind " + std::string(rec->is_method ? "instance" : "static") + " method " +
+ std::string(pybind11::str(rec->scope.attr("__name__"))) + "." + std::string(rec->name) + signature
+ #endif
+ );
+
+ if (rec->prepend) {
+ // Beginning of chain; we need to replace the capsule's current head-of-the-chain
+ // pointer with this one, then make this one point to the previous head of the
+ // chain.
+ chain_start = rec;
+ rec->next = chain;
+ auto rec_capsule = reinterpret_borrow<capsule>(((PyCFunctionObject *) m_ptr)->m_self);
+ rec_capsule.set_pointer(rec);
+ } else {
+ // Or end of chain (normal behavior)
+ chain_start = chain;
+ while (chain->next)
+ chain = chain->next;
+ chain->next = rec;
+ }
+ }
+
+ std::string signatures;
+ int index = 0;
+ /* Create a nice pydoc rec including all signatures and
+ docstrings of the functions in the overload chain */
+ if (chain && options::show_function_signatures()) {
+ // First a generic signature
+ signatures += rec->name;
+ signatures += "(*args, **kwargs)\n";
+ signatures += "Overloaded function.\n\n";
+ }
+ // Then specific overload signatures
+ bool first_user_def = true;
+ for (auto it = chain_start; it != nullptr; it = it->next) {
+ if (options::show_function_signatures()) {
+ if (index > 0) signatures += "\n";
+ if (chain)
+ signatures += std::to_string(++index) + ". ";
+ signatures += rec->name;
+ signatures += it->signature;
+ signatures += "\n";
+ }
+ if (it->doc && strlen(it->doc) > 0 && options::show_user_defined_docstrings()) {
+ // If we're appending another docstring, and aren't printing function signatures, we
+ // need to append a newline first:
+ if (!options::show_function_signatures()) {
+ if (first_user_def) first_user_def = false;
+ else signatures += "\n";
+ }
+ if (options::show_function_signatures()) signatures += "\n";
+ signatures += it->doc;
+ if (options::show_function_signatures()) signatures += "\n";
+ }
+ }
+
+ /* Install docstring */
+ auto *func = (PyCFunctionObject *) m_ptr;
+ if (func->m_ml->ml_doc)
+ std::free(const_cast<char *>(func->m_ml->ml_doc));
+ func->m_ml->ml_doc = strdup(signatures.c_str());
+
+ if (rec->is_method) {
+ m_ptr = PYBIND11_INSTANCE_METHOD_NEW(m_ptr, rec->scope.ptr());
+ if (!m_ptr)
+ pybind11_fail("cpp_function::cpp_function(): Could not allocate instance method object");
+ Py_DECREF(func);
+ }
+ }
+
+ /// When a cpp_function is GCed, release any memory allocated by pybind11
+ static void destruct(detail::function_record *rec) {
+ // If on Python 3.9, check the interpreter "MICRO" (patch) version.
+ // If this is running on 3.9.0, we have to work around a bug.
+ #if !defined(PYPY_VERSION) && PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION == 9
+ static bool is_zero = Py_GetVersion()[4] == '0';
+ #endif
+
+ while (rec) {
+ detail::function_record *next = rec->next;
+ if (rec->free_data)
+ rec->free_data(rec);
+ std::free((char *) rec->name);
+ std::free((char *) rec->doc);
+ std::free((char *) rec->signature);
+ for (auto &arg: rec->args) {
+ std::free(const_cast<char *>(arg.name));
+ std::free(const_cast<char *>(arg.descr));
+ arg.value.dec_ref();
+ }
+ if (rec->def) {
+ std::free(const_cast<char *>(rec->def->ml_doc));
+ // Python 3.9.0 decref's these in the wrong order; rec->def
+ // If loaded on 3.9.0, let these leak (use Python 3.9.1 at runtime to fix)
+ // See https://github.com/python/cpython/pull/22670
+ #if !defined(PYPY_VERSION) && PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION == 9
+ if (!is_zero)
+ delete rec->def;
+ #else
+ delete rec->def;
+ #endif
+ }
+ delete rec;
+ rec = next;
+ }
+ }
+
+ /// Main dispatch logic for calls to functions bound using pybind11
+ static PyObject *dispatcher(PyObject *self, PyObject *args_in, PyObject *kwargs_in) {
+ using namespace detail;
+
+ /* Iterator over the list of potentially admissible overloads */
+ const function_record *overloads = (function_record *) PyCapsule_GetPointer(self, nullptr),
+ *it = overloads;
+
+ /* Need to know how many arguments + keyword arguments there are to pick the right overload */
+ const auto n_args_in = (size_t) PyTuple_GET_SIZE(args_in);
+
+ handle parent = n_args_in > 0 ? PyTuple_GET_ITEM(args_in, 0) : nullptr,
+ result = PYBIND11_TRY_NEXT_OVERLOAD;
+
+ auto self_value_and_holder = value_and_holder();
+ if (overloads->is_constructor) {
+ const auto tinfo = get_type_info((PyTypeObject *) overloads->scope.ptr());
+ const auto pi = reinterpret_cast<instance *>(parent.ptr());
+ self_value_and_holder = pi->get_value_and_holder(tinfo, false);
+
+ if (!self_value_and_holder.type || !self_value_and_holder.inst) {
+ PyErr_SetString(PyExc_TypeError, "__init__(self, ...) called with invalid `self` argument");
+ return nullptr;
+ }
+
+ // If this value is already registered it must mean __init__ is invoked multiple times;
+ // we really can't support that in C++, so just ignore the second __init__.
+ if (self_value_and_holder.instance_registered())
+ return none().release().ptr();
+ }
+
+ try {
+ // We do this in two passes: in the first pass, we load arguments with `convert=false`;
+ // in the second, we allow conversion (except for arguments with an explicit
+ // py::arg().noconvert()). This lets us prefer calls without conversion, with
+ // conversion as a fallback.
+ std::vector<function_call> second_pass;
+
+ // However, if there are no overloads, we can just skip the no-convert pass entirely
+ const bool overloaded = it != nullptr && it->next != nullptr;
+
+ for (; it != nullptr; it = it->next) {
+
+ /* For each overload:
+ 1. Copy all positional arguments we were given, also checking to make sure that
+ named positional arguments weren't *also* specified via kwarg.
+ 2. If we weren't given enough, try to make up the omitted ones by checking
+ whether they were provided by a kwarg matching the `py::arg("name")` name. If
+ so, use it (and remove it from kwargs; if not, see if the function binding
+ provided a default that we can use.
+ 3. Ensure that either all keyword arguments were "consumed", or that the function
+ takes a kwargs argument to accept unconsumed kwargs.
+ 4. Any positional arguments still left get put into a tuple (for args), and any
+ leftover kwargs get put into a dict.
+ 5. Pack everything into a vector; if we have py::args or py::kwargs, they are an
+ extra tuple or dict at the end of the positional arguments.
+ 6. Call the function call dispatcher (function_record::impl)
+
+ If one of these fail, move on to the next overload and keep trying until we get a
+ result other than PYBIND11_TRY_NEXT_OVERLOAD.
+ */
+
+ const function_record &func = *it;
+ size_t num_args = func.nargs; // Number of positional arguments that we need
+ if (func.has_args) --num_args; // (but don't count py::args
+ if (func.has_kwargs) --num_args; // or py::kwargs)
+ size_t pos_args = num_args - func.nargs_kw_only;
+
+ if (!func.has_args && n_args_in > pos_args)
+ continue; // Too many positional arguments for this overload
+
+ if (n_args_in < pos_args && func.args.size() < pos_args)
+ continue; // Not enough positional arguments given, and not enough defaults to fill in the blanks
+
+ function_call call(func, parent);
+
+ size_t args_to_copy = (std::min)(pos_args, n_args_in); // Protect std::min with parentheses
+ size_t args_copied = 0;
+
+ // 0. Inject new-style `self` argument
+ if (func.is_new_style_constructor) {
+ // The `value` may have been preallocated by an old-style `__init__`
+ // if it was a preceding candidate for overload resolution.
+ if (self_value_and_holder)
+ self_value_and_holder.type->dealloc(self_value_and_holder);
+
+ call.init_self = PyTuple_GET_ITEM(args_in, 0);
+ call.args.emplace_back(reinterpret_cast<PyObject *>(&self_value_and_holder));
+ call.args_convert.push_back(false);
+ ++args_copied;
+ }
+
+ // 1. Copy any position arguments given.
+ bool bad_arg = false;
+ for (; args_copied < args_to_copy; ++args_copied) {
+ const argument_record *arg_rec = args_copied < func.args.size() ? &func.args[args_copied] : nullptr;
+ if (kwargs_in && arg_rec && arg_rec->name && PyDict_GetItemString(kwargs_in, arg_rec->name)) {
+ bad_arg = true;
+ break;
+ }
+
+ handle arg(PyTuple_GET_ITEM(args_in, args_copied));
+ if (arg_rec && !arg_rec->none && arg.is_none()) {
+ bad_arg = true;
+ break;
+ }
+ call.args.push_back(arg);
+ call.args_convert.push_back(arg_rec ? arg_rec->convert : true);
+ }
+ if (bad_arg)
+ continue; // Maybe it was meant for another overload (issue #688)
+
+ // We'll need to copy this if we steal some kwargs for defaults
+ dict kwargs = reinterpret_borrow<dict>(kwargs_in);
+
+ // 1.5. Fill in any missing pos_only args from defaults if they exist
+ if (args_copied < func.nargs_pos_only) {
+ for (; args_copied < func.nargs_pos_only; ++args_copied) {
+ const auto &arg_rec = func.args[args_copied];
+ handle value;
+
+ if (arg_rec.value) {
+ value = arg_rec.value;
+ }
+ if (value) {
+ call.args.push_back(value);
+ call.args_convert.push_back(arg_rec.convert);
+ } else
+ break;
+ }
+
+ if (args_copied < func.nargs_pos_only)
+ continue; // Not enough defaults to fill the positional arguments
+ }
+
+ // 2. Check kwargs and, failing that, defaults that may help complete the list
+ if (args_copied < num_args) {
+ bool copied_kwargs = false;
+
+ for (; args_copied < num_args; ++args_copied) {
+ const auto &arg_rec = func.args[args_copied];
+
+ handle value;
+ if (kwargs_in && arg_rec.name)
+ value = PyDict_GetItemString(kwargs.ptr(), arg_rec.name);
+
+ if (value) {
+ // Consume a kwargs value
+ if (!copied_kwargs) {
+ kwargs = reinterpret_steal<dict>(PyDict_Copy(kwargs.ptr()));
+ copied_kwargs = true;
+ }
+ PyDict_DelItemString(kwargs.ptr(), arg_rec.name);
+ } else if (arg_rec.value) {
+ value = arg_rec.value;
+ }
+
+ if (!arg_rec.none && value.is_none()) {
+ break;
+ }
+
+ if (value) {
+ call.args.push_back(value);
+ call.args_convert.push_back(arg_rec.convert);
+ }
+ else
+ break;
+ }
+
+ if (args_copied < num_args)
+ continue; // Not enough arguments, defaults, or kwargs to fill the positional arguments
+ }
+
+ // 3. Check everything was consumed (unless we have a kwargs arg)
+ if (kwargs && !kwargs.empty() && !func.has_kwargs)
+ continue; // Unconsumed kwargs, but no py::kwargs argument to accept them
+
+ // 4a. If we have a py::args argument, create a new tuple with leftovers
+ if (func.has_args) {
+ tuple extra_args;
+ if (args_to_copy == 0) {
+ // We didn't copy out any position arguments from the args_in tuple, so we
+ // can reuse it directly without copying:
+ extra_args = reinterpret_borrow<tuple>(args_in);
+ } else if (args_copied >= n_args_in) {
+ extra_args = tuple(0);
+ } else {
+ size_t args_size = n_args_in - args_copied;
+ extra_args = tuple(args_size);
+ for (size_t i = 0; i < args_size; ++i) {
+ extra_args[i] = PyTuple_GET_ITEM(args_in, args_copied + i);
+ }
+ }
+ call.args.push_back(extra_args);
+ call.args_convert.push_back(false);
+ call.args_ref = std::move(extra_args);
+ }
+
+ // 4b. If we have a py::kwargs, pass on any remaining kwargs
+ if (func.has_kwargs) {
+ if (!kwargs.ptr())
+ kwargs = dict(); // If we didn't get one, send an empty one
+ call.args.push_back(kwargs);
+ call.args_convert.push_back(false);
+ call.kwargs_ref = std::move(kwargs);
+ }
+
+ // 5. Put everything in a vector. Not technically step 5, we've been building it
+ // in `call.args` all along.
+ #if !defined(NDEBUG)
+ if (call.args.size() != func.nargs || call.args_convert.size() != func.nargs)
+ pybind11_fail("Internal error: function call dispatcher inserted wrong number of arguments!");
+ #endif
+
+ std::vector<bool> second_pass_convert;
+ if (overloaded) {
+ // We're in the first no-convert pass, so swap out the conversion flags for a
+ // set of all-false flags. If the call fails, we'll swap the flags back in for
+ // the conversion-allowed call below.
+ second_pass_convert.resize(func.nargs, false);
+ call.args_convert.swap(second_pass_convert);
+ }
+
+ // 6. Call the function.
+ try {
+ loader_life_support guard{};
+ result = func.impl(call);
+ } catch (reference_cast_error &) {
+ result = PYBIND11_TRY_NEXT_OVERLOAD;
+ }
+
+ if (result.ptr() != PYBIND11_TRY_NEXT_OVERLOAD)
+ break;
+
+ if (overloaded) {
+ // The (overloaded) call failed; if the call has at least one argument that
+ // permits conversion (i.e. it hasn't been explicitly specified `.noconvert()`)
+ // then add this call to the list of second pass overloads to try.
+ for (size_t i = func.is_method ? 1 : 0; i < pos_args; i++) {
+ if (second_pass_convert[i]) {
+ // Found one: swap the converting flags back in and store the call for
+ // the second pass.
+ call.args_convert.swap(second_pass_convert);
+ second_pass.push_back(std::move(call));
+ break;
+ }
+ }
+ }
+ }
+
+ if (overloaded && !second_pass.empty() && result.ptr() == PYBIND11_TRY_NEXT_OVERLOAD) {
+ // The no-conversion pass finished without success, try again with conversion allowed
+ for (auto &call : second_pass) {
+ try {
+ loader_life_support guard{};
+ result = call.func.impl(call);
+ } catch (reference_cast_error &) {
+ result = PYBIND11_TRY_NEXT_OVERLOAD;
+ }
+
+ if (result.ptr() != PYBIND11_TRY_NEXT_OVERLOAD) {
+ // The error reporting logic below expects 'it' to be valid, as it would be
+ // if we'd encountered this failure in the first-pass loop.
+ if (!result)
+ it = &call.func;
+ break;
+ }
+ }
+ }
+ } catch (error_already_set &e) {
+ e.restore();
+ return nullptr;
+#ifdef __GLIBCXX__
+ } catch ( abi::__forced_unwind& ) {
+ throw;
+#endif
+ } catch (...) {
+ /* When an exception is caught, give each registered exception
+ translator a chance to translate it to a Python exception
+ in reverse order of registration.
+
+ A translator may choose to do one of the following:
+
+ - catch the exception and call PyErr_SetString or PyErr_SetObject
+ to set a standard (or custom) Python exception, or
+ - do nothing and let the exception fall through to the next translator, or
+ - delegate translation to the next translator by throwing a new type of exception. */
+
+ auto last_exception = std::current_exception();
+ auto ®istered_exception_translators = get_internals().registered_exception_translators;
+ for (auto& translator : registered_exception_translators) {
+ try {
+ translator(last_exception);
+ } catch (...) {
+ last_exception = std::current_exception();
+ continue;
+ }
+ return nullptr;
+ }
+ PyErr_SetString(PyExc_SystemError, "Exception escaped from default exception translator!");
+ return nullptr;
+ }
+
+ auto append_note_if_missing_header_is_suspected = [](std::string &msg) {
+ if (msg.find("std::") != std::string::npos) {
+ msg += "\n\n"
+ "Did you forget to `#include <pybind11/stl.h>`? Or <pybind11/complex.h>,\n"
+ "<pybind11/functional.h>, <pybind11/chrono.h>, etc. Some automatic\n"
+ "conversions are optional and require extra headers to be included\n"
+ "when compiling your pybind11 module.";
+ }
+ };
+
+ if (result.ptr() == PYBIND11_TRY_NEXT_OVERLOAD) {
+ if (overloads->is_operator)
+ return handle(Py_NotImplemented).inc_ref().ptr();
+
+ std::string msg = std::string(overloads->name) + "(): incompatible " +
+ std::string(overloads->is_constructor ? "constructor" : "function") +
+ " arguments. The following argument types are supported:\n";
+
+ int ctr = 0;
+ for (const function_record *it2 = overloads; it2 != nullptr; it2 = it2->next) {
+ msg += " "+ std::to_string(++ctr) + ". ";
+
+ bool wrote_sig = false;
+ if (overloads->is_constructor) {
+ // For a constructor, rewrite `(self: Object, arg0, ...) -> NoneType` as `Object(arg0, ...)`
+ std::string sig = it2->signature;
+ size_t start = sig.find('(') + 7; // skip "(self: "
+ if (start < sig.size()) {
+ // End at the , for the next argument
+ size_t end = sig.find(", "), next = end + 2;
+ size_t ret = sig.rfind(" -> ");
+ // Or the ), if there is no comma:
+ if (end >= sig.size()) next = end = sig.find(')');
+ if (start < end && next < sig.size()) {
+ msg.append(sig, start, end - start);
+ msg += '(';
+ msg.append(sig, next, ret - next);
+ wrote_sig = true;
+ }
+ }
+ }
+ if (!wrote_sig) msg += it2->signature;
+
+ msg += "\n";
+ }
+ msg += "\nInvoked with: ";
+ auto args_ = reinterpret_borrow<tuple>(args_in);
+ bool some_args = false;
+ for (size_t ti = overloads->is_constructor ? 1 : 0; ti < args_.size(); ++ti) {
+ if (!some_args) some_args = true;
+ else msg += ", ";
+ try {
+ msg += pybind11::repr(args_[ti]);
+ } catch (const error_already_set&) {
+ msg += "<repr raised Error>";
+ }
+ }
+ if (kwargs_in) {
+ auto kwargs = reinterpret_borrow<dict>(kwargs_in);
+ if (!kwargs.empty()) {
+ if (some_args) msg += "; ";
+ msg += "kwargs: ";
+ bool first = true;
+ for (auto kwarg : kwargs) {
+ if (first) first = false;
+ else msg += ", ";
+ msg += pybind11::str("{}=").format(kwarg.first);
+ try {
+ msg += pybind11::repr(kwarg.second);
+ } catch (const error_already_set&) {
+ msg += "<repr raised Error>";
+ }
+ }
+ }
+ }
+
+ append_note_if_missing_header_is_suspected(msg);
+ PyErr_SetString(PyExc_TypeError, msg.c_str());
+ return nullptr;
+ } else if (!result) {
+ std::string msg = "Unable to convert function return value to a "
+ "Python type! The signature was\n\t";
+ msg += it->signature;
+ append_note_if_missing_header_is_suspected(msg);
+ PyErr_SetString(PyExc_TypeError, msg.c_str());
+ return nullptr;
+ } else {
+ if (overloads->is_constructor && !self_value_and_holder.holder_constructed()) {
+ auto *pi = reinterpret_cast<instance *>(parent.ptr());
+ self_value_and_holder.type->init_instance(pi, nullptr);
+ }
+ return result.ptr();
+ }
+ }
+};
+
+/// Wrapper for Python extension modules
+class module_ : public object {
+public:
+ PYBIND11_OBJECT_DEFAULT(module_, object, PyModule_Check)
+
+ /// Create a new top-level Python module with the given name and docstring
+ PYBIND11_DEPRECATED("Use PYBIND11_MODULE or module_::create_extension_module instead")
+ explicit module_(const char *name, const char *doc = nullptr) {
+#if PY_MAJOR_VERSION >= 3
+ *this = create_extension_module(name, doc, new PyModuleDef());
+#else
+ *this = create_extension_module(name, doc, nullptr);
+#endif
+ }
+
+ /** \rst
+ Create Python binding for a new function within the module scope. ``Func``
+ can be a plain C++ function, a function pointer, or a lambda function. For
+ details on the ``Extra&& ... extra`` argument, see section :ref:`extras`.
+ \endrst */
+ template <typename Func, typename... Extra>
+ module_ &def(const char *name_, Func &&f, const Extra& ... extra) {
+ cpp_function func(std::forward<Func>(f), name(name_), scope(*this),
+ sibling(getattr(*this, name_, none())), extra...);
+ // NB: allow overwriting here because cpp_function sets up a chain with the intention of
+ // overwriting (and has already checked internally that it isn't overwriting non-functions).
+ add_object(name_, func, true /* overwrite */);
+ return *this;
+ }
+
+ /** \rst
+ Create and return a new Python submodule with the given name and docstring.
+ This also works recursively, i.e.
+
+ .. code-block:: cpp
+
+ py::module_ m("example", "pybind11 example plugin");
+ py::module_ m2 = m.def_submodule("sub", "A submodule of 'example'");
+ py::module_ m3 = m2.def_submodule("subsub", "A submodule of 'example.sub'");
+ \endrst */
+ module_ def_submodule(const char *name, const char *doc = nullptr) {
+ std::string full_name = std::string(PyModule_GetName(m_ptr))
+ + std::string(".") + std::string(name);
+ auto result = reinterpret_borrow<module_>(PyImport_AddModule(full_name.c_str()));
+ if (doc && options::show_user_defined_docstrings())
+ result.attr("__doc__") = pybind11::str(doc);
+ attr(name) = result;
+ return result;
+ }
+
+ /// Import and return a module or throws `error_already_set`.
+ static module_ import(const char *name) {
+ PyObject *obj = PyImport_ImportModule(name);
+ if (!obj)
+ throw error_already_set();
+ return reinterpret_steal<module_>(obj);
+ }
+
+ /// Reload the module or throws `error_already_set`.
+ void reload() {
+ PyObject *obj = PyImport_ReloadModule(ptr());
+ if (!obj)
+ throw error_already_set();
+ *this = reinterpret_steal<module_>(obj);
+ }
+
+ /** \rst
+ Adds an object to the module using the given name. Throws if an object with the given name
+ already exists.
+
+ ``overwrite`` should almost always be false: attempting to overwrite objects that pybind11 has
+ established will, in most cases, break things.
+ \endrst */
+ PYBIND11_NOINLINE void add_object(const char *name, handle obj, bool overwrite = false) {
+ if (!overwrite && hasattr(*this, name))
+ pybind11_fail("Error during initialization: multiple incompatible definitions with name \"" +
+ std::string(name) + "\"");
+
+ PyModule_AddObject(ptr(), name, obj.inc_ref().ptr() /* steals a reference */);
+ }
+
+#if PY_MAJOR_VERSION >= 3
+ using module_def = PyModuleDef;
+#else
+ struct module_def {};
+#endif
+
+ /** \rst
+ Create a new top-level module that can be used as the main module of a C extension.
+
+ For Python 3, ``def`` should point to a staticly allocated module_def.
+ For Python 2, ``def`` can be a nullptr and is completely ignored.
+ \endrst */
+ static module_ create_extension_module(const char *name, const char *doc, module_def *def) {
+#if PY_MAJOR_VERSION >= 3
+ // module_def is PyModuleDef
+ def = new (def) PyModuleDef { // Placement new (not an allocation).
+ /* m_base */ PyModuleDef_HEAD_INIT,
+ /* m_name */ name,
+ /* m_doc */ options::show_user_defined_docstrings() ? doc : nullptr,
+ /* m_size */ -1,
+ /* m_methods */ nullptr,
+ /* m_slots */ nullptr,
+ /* m_traverse */ nullptr,
+ /* m_clear */ nullptr,
+ /* m_free */ nullptr
+ };
+ auto m = PyModule_Create(def);
+#else
+ // Ignore module_def *def; only necessary for Python 3
+ (void) def;
+ auto m = Py_InitModule3(name, nullptr, options::show_user_defined_docstrings() ? doc : nullptr);
+#endif
+ if (m == nullptr) {
+ if (PyErr_Occurred())
+ throw error_already_set();
+ pybind11_fail("Internal error in module_::create_extension_module()");
+ }
+ // TODO: Sould be reinterpret_steal for Python 3, but Python also steals it again when returned from PyInit_...
+ // For Python 2, reinterpret_borrow is correct.
+ return reinterpret_borrow<module_>(m);
+ }
+};
+
+// When inside a namespace (or anywhere as long as it's not the first item on a line),
+// C++20 allows "module" to be used. This is provided for backward compatibility, and for
+// simplicity, if someone wants to use py::module for example, that is perfectly safe.
+using module = module_;
+
+/// \ingroup python_builtins
+/// Return a dictionary representing the global variables in the current execution frame,
+/// or ``__main__.__dict__`` if there is no frame (usually when the interpreter is embedded).
+inline dict globals() {
+ PyObject *p = PyEval_GetGlobals();
+ return reinterpret_borrow<dict>(p ? p : module_::import("__main__").attr("__dict__").ptr());
+}
+
+PYBIND11_NAMESPACE_BEGIN(detail)
+/// Generic support for creating new Python heap types
+class generic_type : public object {
+public:
+ PYBIND11_OBJECT_DEFAULT(generic_type, object, PyType_Check)
+protected:
+ void initialize(const type_record &rec) {
+ if (rec.scope && hasattr(rec.scope, "__dict__") && rec.scope.attr("__dict__").contains(rec.name))
+ pybind11_fail("generic_type: cannot initialize type \"" + std::string(rec.name) +
+ "\": an object with that name is already defined");
+
+ if (rec.module_local ? get_local_type_info(*rec.type) : get_global_type_info(*rec.type))
+ pybind11_fail("generic_type: type \"" + std::string(rec.name) +
+ "\" is already registered!");
+
+ m_ptr = make_new_python_type(rec);
+
+ /* Register supplemental type information in C++ dict */
+ auto *tinfo = new detail::type_info();
+ tinfo->type = (PyTypeObject *) m_ptr;
+ tinfo->cpptype = rec.type;
+ tinfo->type_size = rec.type_size;
+ tinfo->type_align = rec.type_align;
+ tinfo->operator_new = rec.operator_new;
+ tinfo->holder_size_in_ptrs = size_in_ptrs(rec.holder_size);
+ tinfo->init_instance = rec.init_instance;
+ tinfo->dealloc = rec.dealloc;
+ tinfo->simple_type = true;
+ tinfo->simple_ancestors = true;
+ tinfo->default_holder = rec.default_holder;
+ tinfo->module_local = rec.module_local;
+
+ auto &internals = get_internals();
+ auto tindex = std::type_index(*rec.type);
+ tinfo->direct_conversions = &internals.direct_conversions[tindex];
+ if (rec.module_local)
+ registered_local_types_cpp()[tindex] = tinfo;
+ else
+ internals.registered_types_cpp[tindex] = tinfo;
+ internals.registered_types_py[(PyTypeObject *) m_ptr] = { tinfo };
+
+ if (rec.bases.size() > 1 || rec.multiple_inheritance) {
+ mark_parents_nonsimple(tinfo->type);
+ tinfo->simple_ancestors = false;
+ }
+ else if (rec.bases.size() == 1) {
+ auto parent_tinfo = get_type_info((PyTypeObject *) rec.bases[0].ptr());
+ tinfo->simple_ancestors = parent_tinfo->simple_ancestors;
+ }
+
+ if (rec.module_local) {
+ // Stash the local typeinfo and loader so that external modules can access it.
+ tinfo->module_local_load = &type_caster_generic::local_load;
+ setattr(m_ptr, PYBIND11_MODULE_LOCAL_ID, capsule(tinfo));
+ }
+ }
+
+ /// Helper function which tags all parents of a type using mult. inheritance
+ void mark_parents_nonsimple(PyTypeObject *value) {
+ auto t = reinterpret_borrow<tuple>(value->tp_bases);
+ for (handle h : t) {
+ auto tinfo2 = get_type_info((PyTypeObject *) h.ptr());
+ if (tinfo2)
+ tinfo2->simple_type = false;
+ mark_parents_nonsimple((PyTypeObject *) h.ptr());
+ }
+ }
+
+ void install_buffer_funcs(
+ buffer_info *(*get_buffer)(PyObject *, void *),
+ void *get_buffer_data) {
+ auto *type = (PyHeapTypeObject*) m_ptr;
+ auto tinfo = detail::get_type_info(&type->ht_type);
+
+ if (!type->ht_type.tp_as_buffer)
+ pybind11_fail(
+ "To be able to register buffer protocol support for the type '" +
+ get_fully_qualified_tp_name(tinfo->type) +
+ "' the associated class<>(..) invocation must "
+ "include the pybind11::buffer_protocol() annotation!");
+
+ tinfo->get_buffer = get_buffer;
+ tinfo->get_buffer_data = get_buffer_data;
+ }
+
+ // rec_func must be set for either fget or fset.
+ void def_property_static_impl(const char *name,
+ handle fget, handle fset,
+ detail::function_record *rec_func) {
+ const auto is_static = rec_func && !(rec_func->is_method && rec_func->scope);
+ const auto has_doc = rec_func && rec_func->doc && pybind11::options::show_user_defined_docstrings();
+ auto property = handle((PyObject *) (is_static ? get_internals().static_property_type
+ : &PyProperty_Type));
+ attr(name) = property(fget.ptr() ? fget : none(),
+ fset.ptr() ? fset : none(),
+ /*deleter*/none(),
+ pybind11::str(has_doc ? rec_func->doc : ""));
+ }
+};
+
+/// Set the pointer to operator new if it exists. The cast is needed because it can be overloaded.
+template <typename T, typename = void_t<decltype(static_cast<void *(*)(size_t)>(T::operator new))>>
+void set_operator_new(type_record *r) { r->operator_new = &T::operator new; }
+
+template <typename> void set_operator_new(...) { }
+
+template <typename T, typename SFINAE = void> struct has_operator_delete : std::false_type { };
+template <typename T> struct has_operator_delete<T, void_t<decltype(static_cast<void (*)(void *)>(T::operator delete))>>
+ : std::true_type { };
+template <typename T, typename SFINAE = void> struct has_operator_delete_size : std::false_type { };
+template <typename T> struct has_operator_delete_size<T, void_t<decltype(static_cast<void (*)(void *, size_t)>(T::operator delete))>>
+ : std::true_type { };
+/// Call class-specific delete if it exists or global otherwise. Can also be an overload set.
+template <typename T, enable_if_t<has_operator_delete<T>::value, int> = 0>
+void call_operator_delete(T *p, size_t, size_t) { T::operator delete(p); }
+template <typename T, enable_if_t<!has_operator_delete<T>::value && has_operator_delete_size<T>::value, int> = 0>
+void call_operator_delete(T *p, size_t s, size_t) { T::operator delete(p, s); }
+
+inline void call_operator_delete(void *p, size_t s, size_t a) {
+ (void)s; (void)a;
+ #if defined(__cpp_aligned_new) && (!defined(_MSC_VER) || _MSC_VER >= 1912)
+ if (a > __STDCPP_DEFAULT_NEW_ALIGNMENT__) {
+ #ifdef __cpp_sized_deallocation
+ ::operator delete(p, s, std::align_val_t(a));
+ #else
+ ::operator delete(p, std::align_val_t(a));
+ #endif
+ return;
+ }
+ #endif
+ #ifdef __cpp_sized_deallocation
+ ::operator delete(p, s);
+ #else
+ ::operator delete(p);
+ #endif
+}
+
+inline void add_class_method(object& cls, const char *name_, const cpp_function &cf) {
+ cls.attr(cf.name()) = cf;
+ if (strcmp(name_, "__eq__") == 0 && !cls.attr("__dict__").contains("__hash__")) {
+ cls.attr("__hash__") = none();
+ }
+}
+
+PYBIND11_NAMESPACE_END(detail)
+
+/// Given a pointer to a member function, cast it to its `Derived` version.
+/// Forward everything else unchanged.
+template <typename /*Derived*/, typename F>
+auto method_adaptor(F &&f) -> decltype(std::forward<F>(f)) { return std::forward<F>(f); }
+
+template <typename Derived, typename Return, typename Class, typename... Args>
+auto method_adaptor(Return (Class::*pmf)(Args...)) -> Return (Derived::*)(Args...) {
+ static_assert(detail::is_accessible_base_of<Class, Derived>::value,
+ "Cannot bind an inaccessible base class method; use a lambda definition instead");
+ return pmf;
+}
+
+template <typename Derived, typename Return, typename Class, typename... Args>
+auto method_adaptor(Return (Class::*pmf)(Args...) const) -> Return (Derived::*)(Args...) const {
+ static_assert(detail::is_accessible_base_of<Class, Derived>::value,
+ "Cannot bind an inaccessible base class method; use a lambda definition instead");
+ return pmf;
+}
+
+template <typename type_, typename... options>
+class class_ : public detail::generic_type {
+ template <typename T> using is_holder = detail::is_holder_type<type_, T>;
+ template <typename T> using is_subtype = detail::is_strict_base_of<type_, T>;
+ template <typename T> using is_base = detail::is_strict_base_of<T, type_>;
+ // struct instead of using here to help MSVC:
+ template <typename T> struct is_valid_class_option :
+ detail::any_of<is_holder<T>, is_subtype<T>, is_base<T>> {};
+
+public:
+ using type = type_;
+ using type_alias = detail::exactly_one_t<is_subtype, void, options...>;
+ constexpr static bool has_alias = !std::is_void<type_alias>::value;
+ using holder_type = detail::exactly_one_t<is_holder, std::unique_ptr<type>, options...>;
+
+ static_assert(detail::all_of<is_valid_class_option<options>...>::value,
+ "Unknown/invalid class_ template parameters provided");
+
+ static_assert(!has_alias || std::is_polymorphic<type>::value,
+ "Cannot use an alias class with a non-polymorphic type");
+
+ PYBIND11_OBJECT(class_, generic_type, PyType_Check)
+
+ template <typename... Extra>
+ class_(handle scope, const char *name, const Extra &... extra) {
+ using namespace detail;
+
+ // MI can only be specified via class_ template options, not constructor parameters
+ static_assert(
+ none_of<is_pyobject<Extra>...>::value || // no base class arguments, or:
+ ( constexpr_sum(is_pyobject<Extra>::value...) == 1 && // Exactly one base
+ constexpr_sum(is_base<options>::value...) == 0 && // no template option bases
+ none_of<std::is_same<multiple_inheritance, Extra>...>::value), // no multiple_inheritance attr
+ "Error: multiple inheritance bases must be specified via class_ template options");
+
+ type_record record;
+ record.scope = scope;
+ record.name = name;
+ record.type = &typeid(type);
+ record.type_size = sizeof(conditional_t<has_alias, type_alias, type>);
+ record.type_align = alignof(conditional_t<has_alias, type_alias, type>&);
+ record.holder_size = sizeof(holder_type);
+ record.init_instance = init_instance;
+ record.dealloc = dealloc;
+ record.default_holder = detail::is_instantiation<std::unique_ptr, holder_type>::value;
+
+ set_operator_new<type>(&record);
+
+ /* Register base classes specified via template arguments to class_, if any */
+ PYBIND11_EXPAND_SIDE_EFFECTS(add_base<options>(record));
+
+ /* Process optional arguments, if any */
+ process_attributes<Extra...>::init(extra..., &record);
+
+ generic_type::initialize(record);
+
+ if (has_alias) {
+ auto &instances = record.module_local ? registered_local_types_cpp() : get_internals().registered_types_cpp;
+ instances[std::type_index(typeid(type_alias))] = instances[std::type_index(typeid(type))];
+ }
+ }
+
+ template <typename Base, detail::enable_if_t<is_base<Base>::value, int> = 0>
+ static void add_base(detail::type_record &rec) {
+ rec.add_base(typeid(Base), [](void *src) -> void * {
+ return static_cast<Base *>(reinterpret_cast<type *>(src));
+ });
+ }
+
+ template <typename Base, detail::enable_if_t<!is_base<Base>::value, int> = 0>
+ static void add_base(detail::type_record &) { }
+
+ template <typename Func, typename... Extra>
+ class_ &def(const char *name_, Func&& f, const Extra&... extra) {
+ cpp_function cf(method_adaptor<type>(std::forward<Func>(f)), name(name_), is_method(*this),
+ sibling(getattr(*this, name_, none())), extra...);
+ add_class_method(*this, name_, cf);
+ return *this;
+ }
+
+ template <typename Func, typename... Extra> class_ &
+ def_static(const char *name_, Func &&f, const Extra&... extra) {
+ static_assert(!std::is_member_function_pointer<Func>::value,
+ "def_static(...) called with a non-static member function pointer");
+ cpp_function cf(std::forward<Func>(f), name(name_), scope(*this),
+ sibling(getattr(*this, name_, none())), extra...);
+ attr(cf.name()) = staticmethod(cf);
+ return *this;
+ }
+
+ template <detail::op_id id, detail::op_type ot, typename L, typename R, typename... Extra>
+ class_ &def(const detail::op_<id, ot, L, R> &op, const Extra&... extra) {
+ op.execute(*this, extra...);
+ return *this;
+ }
+
+ template <detail::op_id id, detail::op_type ot, typename L, typename R, typename... Extra>
+ class_ & def_cast(const detail::op_<id, ot, L, R> &op, const Extra&... extra) {
+ op.execute_cast(*this, extra...);
+ return *this;
+ }
+
+ template <typename... Args, typename... Extra>
+ class_ &def(const detail::initimpl::constructor<Args...> &init, const Extra&... extra) {
+ init.execute(*this, extra...);
+ return *this;
+ }
+
+ template <typename... Args, typename... Extra>
+ class_ &def(const detail::initimpl::alias_constructor<Args...> &init, const Extra&... extra) {
+ init.execute(*this, extra...);
+ return *this;
+ }
+
+ template <typename... Args, typename... Extra>
+ class_ &def(detail::initimpl::factory<Args...> &&init, const Extra&... extra) {
+ std::move(init).execute(*this, extra...);
+ return *this;
+ }
+
+ template <typename... Args, typename... Extra>
+ class_ &def(detail::initimpl::pickle_factory<Args...> &&pf, const Extra &...extra) {
+ std::move(pf).execute(*this, extra...);
+ return *this;
+ }
+
+ template <typename Func>
+ class_& def_buffer(Func &&func) {
+ struct capture { Func func; };
+ auto *ptr = new capture { std::forward<Func>(func) };
+ install_buffer_funcs([](PyObject *obj, void *ptr) -> buffer_info* {
+ detail::make_caster<type> caster;
+ if (!caster.load(obj, false))
+ return nullptr;
+ return new buffer_info(((capture *) ptr)->func(caster));
+ }, ptr);
+ weakref(m_ptr, cpp_function([ptr](handle wr) {
+ delete ptr;
+ wr.dec_ref();
+ })).release();
+ return *this;
+ }
+
+ template <typename Return, typename Class, typename... Args>
+ class_ &def_buffer(Return (Class::*func)(Args...)) {
+ return def_buffer([func] (type &obj) { return (obj.*func)(); });
+ }
+
+ template <typename Return, typename Class, typename... Args>
+ class_ &def_buffer(Return (Class::*func)(Args...) const) {
+ return def_buffer([func] (const type &obj) { return (obj.*func)(); });
+ }
+
+ template <typename C, typename D, typename... Extra>
+ class_ &def_readwrite(const char *name, D C::*pm, const Extra&... extra) {
+ static_assert(std::is_same<C, type>::value || std::is_base_of<C, type>::value, "def_readwrite() requires a class member (or base class member)");
+ cpp_function fget([pm](const type &c) -> const D &{ return c.*pm; }, is_method(*this)),
+ fset([pm](type &c, const D &value) { c.*pm = value; }, is_method(*this));
+ def_property(name, fget, fset, return_value_policy::reference_internal, extra...);
+ return *this;
+ }
+
+ template <typename C, typename D, typename... Extra>
+ class_ &def_readonly(const char *name, const D C::*pm, const Extra& ...extra) {
+ static_assert(std::is_same<C, type>::value || std::is_base_of<C, type>::value, "def_readonly() requires a class member (or base class member)");
+ cpp_function fget([pm](const type &c) -> const D &{ return c.*pm; }, is_method(*this));
+ def_property_readonly(name, fget, return_value_policy::reference_internal, extra...);
+ return *this;
+ }
+
+ template <typename D, typename... Extra>
+ class_ &def_readwrite_static(const char *name, D *pm, const Extra& ...extra) {
+ cpp_function fget([pm](object) -> const D &{ return *pm; }, scope(*this)),
+ fset([pm](object, const D &value) { *pm = value; }, scope(*this));
+ def_property_static(name, fget, fset, return_value_policy::reference, extra...);
+ return *this;
+ }
+
+ template <typename D, typename... Extra>
+ class_ &def_readonly_static(const char *name, const D *pm, const Extra& ...extra) {
+ cpp_function fget([pm](object) -> const D &{ return *pm; }, scope(*this));
+ def_property_readonly_static(name, fget, return_value_policy::reference, extra...);
+ return *this;
+ }
+
+ /// Uses return_value_policy::reference_internal by default
+ template <typename Getter, typename... Extra>
+ class_ &def_property_readonly(const char *name, const Getter &fget, const Extra& ...extra) {
+ return def_property_readonly(name, cpp_function(method_adaptor<type>(fget)),
+ return_value_policy::reference_internal, extra...);
+ }
+
+ /// Uses cpp_function's return_value_policy by default
+ template <typename... Extra>
+ class_ &def_property_readonly(const char *name, const cpp_function &fget, const Extra& ...extra) {
+ return def_property(name, fget, nullptr, extra...);
+ }
+
+ /// Uses return_value_policy::reference by default
+ template <typename Getter, typename... Extra>
+ class_ &def_property_readonly_static(const char *name, const Getter &fget, const Extra& ...extra) {
+ return def_property_readonly_static(name, cpp_function(fget), return_value_policy::reference, extra...);
+ }
+
+ /// Uses cpp_function's return_value_policy by default
+ template <typename... Extra>
+ class_ &def_property_readonly_static(const char *name, const cpp_function &fget, const Extra& ...extra) {
+ return def_property_static(name, fget, nullptr, extra...);
+ }
+
+ /// Uses return_value_policy::reference_internal by default
+ template <typename Getter, typename Setter, typename... Extra>
+ class_ &def_property(const char *name, const Getter &fget, const Setter &fset, const Extra& ...extra) {
+ return def_property(name, fget, cpp_function(method_adaptor<type>(fset)), extra...);
+ }
+ template <typename Getter, typename... Extra>
+ class_ &def_property(const char *name, const Getter &fget, const cpp_function &fset, const Extra& ...extra) {
+ return def_property(name, cpp_function(method_adaptor<type>(fget)), fset,
+ return_value_policy::reference_internal, extra...);
+ }
+
+ /// Uses cpp_function's return_value_policy by default
+ template <typename... Extra>
+ class_ &def_property(const char *name, const cpp_function &fget, const cpp_function &fset, const Extra& ...extra) {
+ return def_property_static(name, fget, fset, is_method(*this), extra...);
+ }
+
+ /// Uses return_value_policy::reference by default
+ template <typename Getter, typename... Extra>
+ class_ &def_property_static(const char *name, const Getter &fget, const cpp_function &fset, const Extra& ...extra) {
+ return def_property_static(name, cpp_function(fget), fset, return_value_policy::reference, extra...);
+ }
+
+ /// Uses cpp_function's return_value_policy by default
+ template <typename... Extra>
+ class_ &def_property_static(const char *name, const cpp_function &fget, const cpp_function &fset, const Extra& ...extra) {
+ static_assert( 0 == detail::constexpr_sum(std::is_base_of<arg, Extra>::value...),
+ "Argument annotations are not allowed for properties");
+ auto rec_fget = get_function_record(fget), rec_fset = get_function_record(fset);
+ auto *rec_active = rec_fget;
+ if (rec_fget) {
+ char *doc_prev = rec_fget->doc; /* 'extra' field may include a property-specific documentation string */
+ detail::process_attributes<Extra...>::init(extra..., rec_fget);
+ if (rec_fget->doc && rec_fget->doc != doc_prev) {
+ free(doc_prev);
+ rec_fget->doc = strdup(rec_fget->doc);
+ }
+ }
+ if (rec_fset) {
+ char *doc_prev = rec_fset->doc;
+ detail::process_attributes<Extra...>::init(extra..., rec_fset);
+ if (rec_fset->doc && rec_fset->doc != doc_prev) {
+ free(doc_prev);
+ rec_fset->doc = strdup(rec_fset->doc);
+ }
+ if (! rec_active) rec_active = rec_fset;
+ }
+ def_property_static_impl(name, fget, fset, rec_active);
+ return *this;
+ }
+
+private:
+ /// Initialize holder object, variant 1: object derives from enable_shared_from_this
+ template <typename T>
+ static void init_holder(detail::instance *inst, detail::value_and_holder &v_h,
+ const holder_type * /* unused */, const std::enable_shared_from_this<T> * /* dummy */) {
+ try {
+ auto sh = std::dynamic_pointer_cast<typename holder_type::element_type>(
+ v_h.value_ptr<type>()->shared_from_this());
+ if (sh) {
+ new (std::addressof(v_h.holder<holder_type>())) holder_type(std::move(sh));
+ v_h.set_holder_constructed();
+ }
+ } catch (const std::bad_weak_ptr &) {}
+
+ if (!v_h.holder_constructed() && inst->owned) {
+ new (std::addressof(v_h.holder<holder_type>())) holder_type(v_h.value_ptr<type>());
+ v_h.set_holder_constructed();
+ }
+ }
+
+ static void init_holder_from_existing(const detail::value_and_holder &v_h,
+ const holder_type *holder_ptr, std::true_type /*is_copy_constructible*/) {
+ new (std::addressof(v_h.holder<holder_type>())) holder_type(*reinterpret_cast<const holder_type *>(holder_ptr));
+ }
+
+ static void init_holder_from_existing(const detail::value_and_holder &v_h,
+ const holder_type *holder_ptr, std::false_type /*is_copy_constructible*/) {
+ new (std::addressof(v_h.holder<holder_type>())) holder_type(std::move(*const_cast<holder_type *>(holder_ptr)));
+ }
+
+ /// Initialize holder object, variant 2: try to construct from existing holder object, if possible
+ static void init_holder(detail::instance *inst, detail::value_and_holder &v_h,
+ const holder_type *holder_ptr, const void * /* dummy -- not enable_shared_from_this<T>) */) {
+ if (holder_ptr) {
+ init_holder_from_existing(v_h, holder_ptr, std::is_copy_constructible<holder_type>());
+ v_h.set_holder_constructed();
+ } else if (inst->owned || detail::always_construct_holder<holder_type>::value) {
+ new (std::addressof(v_h.holder<holder_type>())) holder_type(v_h.value_ptr<type>());
+ v_h.set_holder_constructed();
+ }
+ }
+
+ /// Performs instance initialization including constructing a holder and registering the known
+ /// instance. Should be called as soon as the `type` value_ptr is set for an instance. Takes an
+ /// optional pointer to an existing holder to use; if not specified and the instance is
+ /// `.owned`, a new holder will be constructed to manage the value pointer.
+ static void init_instance(detail::instance *inst, const void *holder_ptr) {
+ auto v_h = inst->get_value_and_holder(detail::get_type_info(typeid(type)));
+ if (!v_h.instance_registered()) {
+ register_instance(inst, v_h.value_ptr(), v_h.type);
+ v_h.set_instance_registered();
+ }
+ init_holder(inst, v_h, (const holder_type *) holder_ptr, v_h.value_ptr<type>());
+ }
+
+ /// Deallocates an instance; via holder, if constructed; otherwise via operator delete.
+ static void dealloc(detail::value_and_holder &v_h) {
+ // We could be deallocating because we are cleaning up after a Python exception.
+ // If so, the Python error indicator will be set. We need to clear that before
+ // running the destructor, in case the destructor code calls more Python.
+ // If we don't, the Python API will exit with an exception, and pybind11 will
+ // throw error_already_set from the C++ destructor which is forbidden and triggers
+ // std::terminate().
+ error_scope scope;
+ if (v_h.holder_constructed()) {
+ v_h.holder<holder_type>().~holder_type();
+ v_h.set_holder_constructed(false);
+ }
+ else {
+ detail::call_operator_delete(v_h.value_ptr<type>(),
+ v_h.type->type_size,
+ v_h.type->type_align
+ );
+ }
+ v_h.value_ptr() = nullptr;
+ }
+
+ static detail::function_record *get_function_record(handle h) {
+ h = detail::get_function(h);
+ return h ? (detail::function_record *) reinterpret_borrow<capsule>(PyCFunction_GET_SELF(h.ptr()))
+ : nullptr;
+ }
+};
+
+/// Binds an existing constructor taking arguments Args...
+template <typename... Args> detail::initimpl::constructor<Args...> init() { return {}; }
+/// Like `init<Args...>()`, but the instance is always constructed through the alias class (even
+/// when not inheriting on the Python side).
+template <typename... Args> detail::initimpl::alias_constructor<Args...> init_alias() { return {}; }
+
+/// Binds a factory function as a constructor
+template <typename Func, typename Ret = detail::initimpl::factory<Func>>
+Ret init(Func &&f) { return {std::forward<Func>(f)}; }
+
+/// Dual-argument factory function: the first function is called when no alias is needed, the second
+/// when an alias is needed (i.e. due to python-side inheritance). Arguments must be identical.
+template <typename CFunc, typename AFunc, typename Ret = detail::initimpl::factory<CFunc, AFunc>>
+Ret init(CFunc &&c, AFunc &&a) {
+ return {std::forward<CFunc>(c), std::forward<AFunc>(a)};
+}
+
+/// Binds pickling functions `__getstate__` and `__setstate__` and ensures that the type
+/// returned by `__getstate__` is the same as the argument accepted by `__setstate__`.
+template <typename GetState, typename SetState>
+detail::initimpl::pickle_factory<GetState, SetState> pickle(GetState &&g, SetState &&s) {
+ return {std::forward<GetState>(g), std::forward<SetState>(s)};
+}
+
+PYBIND11_NAMESPACE_BEGIN(detail)
+
+inline str enum_name(handle arg) {
+ dict entries = arg.get_type().attr("__entries");
+ for (auto kv : entries) {
+ if (handle(kv.second[int_(0)]).equal(arg))
+ return pybind11::str(kv.first);
+ }
+ return "???";
+}
+
+struct enum_base {
+ enum_base(handle base, handle parent) : m_base(base), m_parent(parent) { }
+
+ PYBIND11_NOINLINE void init(bool is_arithmetic, bool is_convertible) {
+ m_base.attr("__entries") = dict();
+ auto property = handle((PyObject *) &PyProperty_Type);
+ auto static_property = handle((PyObject *) get_internals().static_property_type);
+
+ m_base.attr("__repr__") = cpp_function(
+ [](object arg) -> str {
+ handle type = type::handle_of(arg);
+ object type_name = type.attr("__name__");
+ return pybind11::str("<{}.{}: {}>").format(type_name, enum_name(arg), int_(arg));
+ }, name("__repr__"), is_method(m_base)
+ );
+
+ m_base.attr("name") = property(cpp_function(&enum_name, name("name"), is_method(m_base)));
+
+ m_base.attr("__str__") = cpp_function(
+ [](handle arg) -> str {
+ object type_name = type::handle_of(arg).attr("__name__");
+ return pybind11::str("{}.{}").format(type_name, enum_name(arg));
+ }, name("name"), is_method(m_base)
+ );
+
+ m_base.attr("__doc__") = static_property(cpp_function(
+ [](handle arg) -> std::string {
+ std::string docstring;
+ dict entries = arg.attr("__entries");
+ if (((PyTypeObject *) arg.ptr())->tp_doc)
+ docstring += std::string(((PyTypeObject *) arg.ptr())->tp_doc) + "\n\n";
+ docstring += "Members:";
+ for (auto kv : entries) {
+ auto key = std::string(pybind11::str(kv.first));
+ auto comment = kv.second[int_(1)];
+ docstring += "\n\n " + key;
+ if (!comment.is_none())
+ docstring += " : " + (std::string) pybind11::str(comment);
+ }
+ return docstring;
+ }, name("__doc__")
+ ), none(), none(), "");
+
+ m_base.attr("__members__") = static_property(cpp_function(
+ [](handle arg) -> dict {
+ dict entries = arg.attr("__entries"), m;
+ for (auto kv : entries)
+ m[kv.first] = kv.second[int_(0)];
+ return m;
+ }, name("__members__")), none(), none(), ""
+ );
+
+ #define PYBIND11_ENUM_OP_STRICT(op, expr, strict_behavior) \
+ m_base.attr(op) = cpp_function( \
+ [](object a, object b) { \
+ if (!type::handle_of(a).is(type::handle_of(b))) \
+ strict_behavior; \
+ return expr; \
+ }, \
+ name(op), is_method(m_base), arg("other"))
+
+ #define PYBIND11_ENUM_OP_CONV(op, expr) \
+ m_base.attr(op) = cpp_function( \
+ [](object a_, object b_) { \
+ int_ a(a_), b(b_); \
+ return expr; \
+ }, \
+ name(op), is_method(m_base), arg("other"))
+
+ #define PYBIND11_ENUM_OP_CONV_LHS(op, expr) \
+ m_base.attr(op) = cpp_function( \
+ [](object a_, object b) { \
+ int_ a(a_); \
+ return expr; \
+ }, \
+ name(op), is_method(m_base), arg("other"))
+
+ if (is_convertible) {
+ PYBIND11_ENUM_OP_CONV_LHS("__eq__", !b.is_none() && a.equal(b));
+ PYBIND11_ENUM_OP_CONV_LHS("__ne__", b.is_none() || !a.equal(b));
+
+ if (is_arithmetic) {
+ PYBIND11_ENUM_OP_CONV("__lt__", a < b);
+ PYBIND11_ENUM_OP_CONV("__gt__", a > b);
+ PYBIND11_ENUM_OP_CONV("__le__", a <= b);
+ PYBIND11_ENUM_OP_CONV("__ge__", a >= b);
+ PYBIND11_ENUM_OP_CONV("__and__", a & b);
+ PYBIND11_ENUM_OP_CONV("__rand__", a & b);
+ PYBIND11_ENUM_OP_CONV("__or__", a | b);
+ PYBIND11_ENUM_OP_CONV("__ror__", a | b);
+ PYBIND11_ENUM_OP_CONV("__xor__", a ^ b);
+ PYBIND11_ENUM_OP_CONV("__rxor__", a ^ b);
+ m_base.attr("__invert__") = cpp_function(
+ [](object arg) { return ~(int_(arg)); }, name("__invert__"), is_method(m_base));
+ }
+ } else {
+ PYBIND11_ENUM_OP_STRICT("__eq__", int_(a).equal(int_(b)), return false);
+ PYBIND11_ENUM_OP_STRICT("__ne__", !int_(a).equal(int_(b)), return true);
+
+ if (is_arithmetic) {
+ #define PYBIND11_THROW throw type_error("Expected an enumeration of matching type!");
+ PYBIND11_ENUM_OP_STRICT("__lt__", int_(a) < int_(b), PYBIND11_THROW);
+ PYBIND11_ENUM_OP_STRICT("__gt__", int_(a) > int_(b), PYBIND11_THROW);
+ PYBIND11_ENUM_OP_STRICT("__le__", int_(a) <= int_(b), PYBIND11_THROW);
+ PYBIND11_ENUM_OP_STRICT("__ge__", int_(a) >= int_(b), PYBIND11_THROW);
+ #undef PYBIND11_THROW
+ }
+ }
+
+ #undef PYBIND11_ENUM_OP_CONV_LHS
+ #undef PYBIND11_ENUM_OP_CONV
+ #undef PYBIND11_ENUM_OP_STRICT
+
+ m_base.attr("__getstate__") = cpp_function(
+ [](object arg) { return int_(arg); }, name("__getstate__"), is_method(m_base));
+
+ m_base.attr("__hash__") = cpp_function(
+ [](object arg) { return int_(arg); }, name("__hash__"), is_method(m_base));
+ }
+
+ PYBIND11_NOINLINE void value(char const* name_, object value, const char *doc = nullptr) {
+ dict entries = m_base.attr("__entries");
+ str name(name_);
+ if (entries.contains(name)) {
+ std::string type_name = (std::string) str(m_base.attr("__name__"));
+ throw value_error(type_name + ": element \"" + std::string(name_) + "\" already exists!");
+ }
+
+ entries[name] = std::make_pair(value, doc);
+ m_base.attr(name) = value;
+ }
+
+ PYBIND11_NOINLINE void export_values() {
+ dict entries = m_base.attr("__entries");
+ for (auto kv : entries)
+ m_parent.attr(kv.first) = kv.second[int_(0)];
+ }
+
+ handle m_base;
+ handle m_parent;
+};
+
+PYBIND11_NAMESPACE_END(detail)
+
+/// Binds C++ enumerations and enumeration classes to Python
+template <typename Type> class enum_ : public class_<Type> {
+public:
+ using Base = class_<Type>;
+ using Base::def;
+ using Base::attr;
+ using Base::def_property_readonly;
+ using Base::def_property_readonly_static;
+ using Scalar = typename std::underlying_type<Type>::type;
+
+ template <typename... Extra>
+ enum_(const handle &scope, const char *name, const Extra&... extra)
+ : class_<Type>(scope, name, extra...), m_base(*this, scope) {
+ constexpr bool is_arithmetic = detail::any_of<std::is_same<arithmetic, Extra>...>::value;
+ constexpr bool is_convertible = std::is_convertible<Type, Scalar>::value;
+ m_base.init(is_arithmetic, is_convertible);
+
+ def(init([](Scalar i) { return static_cast<Type>(i); }), arg("value"));
+ def("__int__", [](Type value) { return (Scalar) value; });
+ #if PY_MAJOR_VERSION < 3
+ def("__long__", [](Type value) { return (Scalar) value; });
+ #endif
+ #if PY_MAJOR_VERSION > 3 || (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION >= 8)
+ def("__index__", [](Type value) { return (Scalar) value; });
+ #endif
+
+ attr("__setstate__") = cpp_function(
+ [](detail::value_and_holder &v_h, Scalar arg) {
+ detail::initimpl::setstate<Base>(v_h, static_cast<Type>(arg),
+ Py_TYPE(v_h.inst) != v_h.type->type); },
+ detail::is_new_style_constructor(),
+ pybind11::name("__setstate__"), is_method(*this), arg("state"));
+ }
+
+ /// Export enumeration entries into the parent scope
+ enum_& export_values() {
+ m_base.export_values();
+ return *this;
+ }
+
+ /// Add an enumeration entry
+ enum_& value(char const* name, Type value, const char *doc = nullptr) {
+ m_base.value(name, pybind11::cast(value, return_value_policy::copy), doc);
+ return *this;
+ }
+
+private:
+ detail::enum_base m_base;
+};
+
+PYBIND11_NAMESPACE_BEGIN(detail)
+
+
+inline void keep_alive_impl(handle nurse, handle patient) {
+ if (!nurse || !patient)
+ pybind11_fail("Could not activate keep_alive!");
+
+ if (patient.is_none() || nurse.is_none())
+ return; /* Nothing to keep alive or nothing to be kept alive by */
+
+ auto tinfo = all_type_info(Py_TYPE(nurse.ptr()));
+ if (!tinfo.empty()) {
+ /* It's a pybind-registered type, so we can store the patient in the
+ * internal list. */
+ add_patient(nurse.ptr(), patient.ptr());
+ }
+ else {
+ /* Fall back to clever approach based on weak references taken from
+ * Boost.Python. This is not used for pybind-registered types because
+ * the objects can be destroyed out-of-order in a GC pass. */
+ cpp_function disable_lifesupport(
+ [patient](handle weakref) { patient.dec_ref(); weakref.dec_ref(); });
+
+ weakref wr(nurse, disable_lifesupport);
+
+ patient.inc_ref(); /* reference patient and leak the weak reference */
+ (void) wr.release();
+ }
+}
+
+PYBIND11_NOINLINE inline void keep_alive_impl(size_t Nurse, size_t Patient, function_call &call, handle ret) {
+ auto get_arg = [&](size_t n) {
+ if (n == 0)
+ return ret;
+ else if (n == 1 && call.init_self)
+ return call.init_self;
+ else if (n <= call.args.size())
+ return call.args[n - 1];
+ return handle();
+ };
+
+ keep_alive_impl(get_arg(Nurse), get_arg(Patient));
+}
+
+inline std::pair<decltype(internals::registered_types_py)::iterator, bool> all_type_info_get_cache(PyTypeObject *type) {
+ auto res = get_internals().registered_types_py
+#ifdef __cpp_lib_unordered_map_try_emplace
+ .try_emplace(type);
+#else
+ .emplace(type, std::vector<detail::type_info *>());
+#endif
+ if (res.second) {
+ // New cache entry created; set up a weak reference to automatically remove it if the type
+ // gets destroyed:
+ weakref((PyObject *) type, cpp_function([type](handle wr) {
+ get_internals().registered_types_py.erase(type);
+ wr.dec_ref();
+ })).release();
+ }
+
+ return res;
+}
+
+template <typename Iterator, typename Sentinel, bool KeyIterator, return_value_policy Policy>
+struct iterator_state {
+ Iterator it;
+ Sentinel end;
+ bool first_or_done;
+};
+
+PYBIND11_NAMESPACE_END(detail)
+
+/// Makes a python iterator from a first and past-the-end C++ InputIterator.
+template <return_value_policy Policy = return_value_policy::reference_internal,
+ typename Iterator,
+ typename Sentinel,
+ typename ValueType = decltype(*std::declval<Iterator>()),
+ typename... Extra>
+iterator make_iterator(Iterator first, Sentinel last, Extra &&... extra) {
+ using state = detail::iterator_state<Iterator, Sentinel, false, Policy>;
+
+ if (!detail::get_type_info(typeid(state), false)) {
+ class_<state>(handle(), "iterator", pybind11::module_local())
+ .def("__iter__", [](state &s) -> state& { return s; })
+ .def("__next__", [](state &s) -> ValueType {
+ if (!s.first_or_done)
+ ++s.it;
+ else
+ s.first_or_done = false;
+ if (s.it == s.end) {
+ s.first_or_done = true;
+ throw stop_iteration();
+ }
+ return *s.it;
+ }, std::forward<Extra>(extra)..., Policy);
+ }
+
+ return cast(state{first, last, true});
+}
+
+/// Makes an python iterator over the keys (`.first`) of a iterator over pairs from a
+/// first and past-the-end InputIterator.
+template <return_value_policy Policy = return_value_policy::reference_internal,
+ typename Iterator,
+ typename Sentinel,
+ typename KeyType = decltype((*std::declval<Iterator>()).first),
+ typename... Extra>
+iterator make_key_iterator(Iterator first, Sentinel last, Extra &&... extra) {
+ using state = detail::iterator_state<Iterator, Sentinel, true, Policy>;
+
+ if (!detail::get_type_info(typeid(state), false)) {
+ class_<state>(handle(), "iterator", pybind11::module_local())
+ .def("__iter__", [](state &s) -> state& { return s; })
+ .def("__next__", [](state &s) -> KeyType {
+ if (!s.first_or_done)
+ ++s.it;
+ else
+ s.first_or_done = false;
+ if (s.it == s.end) {
+ s.first_or_done = true;
+ throw stop_iteration();
+ }
+ return (*s.it).first;
+ }, std::forward<Extra>(extra)..., Policy);
+ }
+
+ return cast(state{first, last, true});
+}
+
+/// Makes an iterator over values of an stl container or other container supporting
+/// `std::begin()`/`std::end()`
+template <return_value_policy Policy = return_value_policy::reference_internal,
+ typename Type, typename... Extra> iterator make_iterator(Type &value, Extra&&... extra) {
+ return make_iterator<Policy>(std::begin(value), std::end(value), extra...);
+}
+
+/// Makes an iterator over the keys (`.first`) of a stl map-like container supporting
+/// `std::begin()`/`std::end()`
+template <return_value_policy Policy = return_value_policy::reference_internal,
+ typename Type, typename... Extra> iterator make_key_iterator(Type &value, Extra&&... extra) {
+ return make_key_iterator<Policy>(std::begin(value), std::end(value), extra...);
+}
+
+template <typename InputType, typename OutputType> void implicitly_convertible() {
+ struct set_flag {
+ bool &flag;
+ set_flag(bool &flag) : flag(flag) { flag = true; }
+ ~set_flag() { flag = false; }
+ };
+ auto implicit_caster = [](PyObject *obj, PyTypeObject *type) -> PyObject * {
+ static bool currently_used = false;
+ if (currently_used) // implicit conversions are non-reentrant
+ return nullptr;
+ set_flag flag_helper(currently_used);
+ if (!detail::make_caster<InputType>().load(obj, false))
+ return nullptr;
+ tuple args(1);
+ args[0] = obj;
+ PyObject *result = PyObject_Call((PyObject *) type, args.ptr(), nullptr);
+ if (result == nullptr)
+ PyErr_Clear();
+ return result;
+ };
+
+ if (auto tinfo = detail::get_type_info(typeid(OutputType)))
+ tinfo->implicit_conversions.push_back(implicit_caster);
+ else
+ pybind11_fail("implicitly_convertible: Unable to find type " + type_id<OutputType>());
+}
+
+template <typename ExceptionTranslator>
+void register_exception_translator(ExceptionTranslator&& translator) {
+ detail::get_internals().registered_exception_translators.push_front(
+ std::forward<ExceptionTranslator>(translator));
+}
+
+/**
+ * Wrapper to generate a new Python exception type.
+ *
+ * This should only be used with PyErr_SetString for now.
+ * It is not (yet) possible to use as a py::base.
+ * Template type argument is reserved for future use.
+ */
+template <typename type>
+class exception : public object {
+public:
+ exception() = default;
+ exception(handle scope, const char *name, handle base = PyExc_Exception) {
+ std::string full_name = scope.attr("__name__").cast<std::string>() +
+ std::string(".") + name;
+ m_ptr = PyErr_NewException(const_cast<char *>(full_name.c_str()), base.ptr(), NULL);
+ if (hasattr(scope, "__dict__") && scope.attr("__dict__").contains(name))
+ pybind11_fail("Error during initialization: multiple incompatible "
+ "definitions with name \"" + std::string(name) + "\"");
+ scope.attr(name) = *this;
+ }
+
+ // Sets the current python exception to this exception object with the given message
+ void operator()(const char *message) {
+ PyErr_SetString(m_ptr, message);
+ }
+};
+
+PYBIND11_NAMESPACE_BEGIN(detail)
+// Returns a reference to a function-local static exception object used in the simple
+// register_exception approach below. (It would be simpler to have the static local variable
+// directly in register_exception, but that makes clang <3.5 segfault - issue #1349).
+template <typename CppException>
+exception<CppException> &get_exception_object() { static exception<CppException> ex; return ex; }
+PYBIND11_NAMESPACE_END(detail)
+
+/**
+ * Registers a Python exception in `m` of the given `name` and installs an exception translator to
+ * translate the C++ exception to the created Python exception using the exceptions what() method.
+ * This is intended for simple exception translations; for more complex translation, register the
+ * exception object and translator directly.
+ */
+template <typename CppException>
+exception<CppException> ®ister_exception(handle scope,
+ const char *name,
+ handle base = PyExc_Exception) {
+ auto &ex = detail::get_exception_object<CppException>();
+ if (!ex) ex = exception<CppException>(scope, name, base);
+
+ register_exception_translator([](std::exception_ptr p) {
+ if (!p) return;
+ try {
+ std::rethrow_exception(p);
+ } catch (const CppException &e) {
+ detail::get_exception_object<CppException>()(e.what());
+ }
+ });
+ return ex;
+}
+
+PYBIND11_NAMESPACE_BEGIN(detail)
+PYBIND11_NOINLINE inline void print(tuple args, dict kwargs) {
+ auto strings = tuple(args.size());
+ for (size_t i = 0; i < args.size(); ++i) {
+ strings[i] = str(args[i]);
+ }
+ auto sep = kwargs.contains("sep") ? kwargs["sep"] : cast(" ");
+ auto line = sep.attr("join")(strings);
+
+ object file;
+ if (kwargs.contains("file")) {
+ file = kwargs["file"].cast<object>();
+ } else {
+ try {
+ file = module_::import("sys").attr("stdout");
+ } catch (const error_already_set &) {
+ /* If print() is called from code that is executed as
+ part of garbage collection during interpreter shutdown,
+ importing 'sys' can fail. Give up rather than crashing the
+ interpreter in this case. */
+ return;
+ }
+ }
+
+ auto write = file.attr("write");
+ write(line);
+ write(kwargs.contains("end") ? kwargs["end"] : cast("\n"));
+
+ if (kwargs.contains("flush") && kwargs["flush"].cast<bool>())
+ file.attr("flush")();
+}
+PYBIND11_NAMESPACE_END(detail)
+
+template <return_value_policy policy = return_value_policy::automatic_reference, typename... Args>
+void print(Args &&...args) {
+ auto c = detail::collect_arguments<policy>(std::forward<Args>(args)...);
+ detail::print(c.args(), c.kwargs());
+}
+
+#if defined(WITH_THREAD) && !defined(PYPY_VERSION)
+
+/* The functions below essentially reproduce the PyGILState_* API using a RAII
+ * pattern, but there are a few important differences:
+ *
+ * 1. When acquiring the GIL from an non-main thread during the finalization
+ * phase, the GILState API blindly terminates the calling thread, which
+ * is often not what is wanted. This API does not do this.
+ *
+ * 2. The gil_scoped_release function can optionally cut the relationship
+ * of a PyThreadState and its associated thread, which allows moving it to
+ * another thread (this is a fairly rare/advanced use case).
+ *
+ * 3. The reference count of an acquired thread state can be controlled. This
+ * can be handy to prevent cases where callbacks issued from an external
+ * thread would otherwise constantly construct and destroy thread state data
+ * structures.
+ *
+ * See the Python bindings of NanoGUI (http://github.com/wjakob/nanogui) for an
+ * example which uses features 2 and 3 to migrate the Python thread of
+ * execution to another thread (to run the event loop on the original thread,
+ * in this case).
+ */
+
+class gil_scoped_acquire {
+public:
+ PYBIND11_NOINLINE gil_scoped_acquire() {
+ auto const &internals = detail::get_internals();
+ tstate = (PyThreadState *) PYBIND11_TLS_GET_VALUE(internals.tstate);
+
+ if (!tstate) {
+ /* Check if the GIL was acquired using the PyGILState_* API instead (e.g. if
+ calling from a Python thread). Since we use a different key, this ensures
+ we don't create a new thread state and deadlock in PyEval_AcquireThread
+ below. Note we don't save this state with internals.tstate, since we don't
+ create it we would fail to clear it (its reference count should be > 0). */
+ tstate = PyGILState_GetThisThreadState();
+ }
+
+ if (!tstate) {
+ tstate = PyThreadState_New(internals.istate);
+ #if !defined(NDEBUG)
+ if (!tstate)
+ pybind11_fail("scoped_acquire: could not create thread state!");
+ #endif
+ tstate->gilstate_counter = 0;
+ PYBIND11_TLS_REPLACE_VALUE(internals.tstate, tstate);
+ } else {
+ release = detail::get_thread_state_unchecked() != tstate;
+ }
+
+ if (release) {
+ /* Work around an annoying assertion in PyThreadState_Swap */
+ #if defined(Py_DEBUG)
+ PyInterpreterState *interp = tstate->interp;
+ tstate->interp = nullptr;
+ #endif
+ PyEval_AcquireThread(tstate);
+ #if defined(Py_DEBUG)
+ tstate->interp = interp;
+ #endif
+ }
+
+ inc_ref();
+ }
+
+ void inc_ref() {
+ ++tstate->gilstate_counter;
+ }
+
+ PYBIND11_NOINLINE void dec_ref() {
+ --tstate->gilstate_counter;
+ #if !defined(NDEBUG)
+ if (detail::get_thread_state_unchecked() != tstate)
+ pybind11_fail("scoped_acquire::dec_ref(): thread state must be current!");
+ if (tstate->gilstate_counter < 0)
+ pybind11_fail("scoped_acquire::dec_ref(): reference count underflow!");
+ #endif
+ if (tstate->gilstate_counter == 0) {
+ #if !defined(NDEBUG)
+ if (!release)
+ pybind11_fail("scoped_acquire::dec_ref(): internal error!");
+ #endif
+ PyThreadState_Clear(tstate);
+ PyThreadState_DeleteCurrent();
+ PYBIND11_TLS_DELETE_VALUE(detail::get_internals().tstate);
+ release = false;
+ }
+ }
+
+ PYBIND11_NOINLINE ~gil_scoped_acquire() {
+ dec_ref();
+ if (release)
+ PyEval_SaveThread();
+ }
+private:
+ PyThreadState *tstate = nullptr;
+ bool release = true;
+};
+
+class gil_scoped_release {
+public:
+ explicit gil_scoped_release(bool disassoc = false) : disassoc(disassoc) {
+ // `get_internals()` must be called here unconditionally in order to initialize
+ // `internals.tstate` for subsequent `gil_scoped_acquire` calls. Otherwise, an
+ // initialization race could occur as multiple threads try `gil_scoped_acquire`.
+ const auto &internals = detail::get_internals();
+ tstate = PyEval_SaveThread();
+ if (disassoc) {
+ auto key = internals.tstate;
+ PYBIND11_TLS_DELETE_VALUE(key);
+ }
+ }
+ ~gil_scoped_release() {
+ if (!tstate)
+ return;
+ PyEval_RestoreThread(tstate);
+ if (disassoc) {
+ auto key = detail::get_internals().tstate;
+ PYBIND11_TLS_REPLACE_VALUE(key, tstate);
+ }
+ }
+private:
+ PyThreadState *tstate;
+ bool disassoc;
+};
+#elif defined(PYPY_VERSION)
+class gil_scoped_acquire {
+ PyGILState_STATE state;
+public:
+ gil_scoped_acquire() { state = PyGILState_Ensure(); }
+ ~gil_scoped_acquire() { PyGILState_Release(state); }
+};
+
+class gil_scoped_release {
+ PyThreadState *state;
+public:
+ gil_scoped_release() { state = PyEval_SaveThread(); }
+ ~gil_scoped_release() { PyEval_RestoreThread(state); }
+};
+#else
+class gil_scoped_acquire { };
+class gil_scoped_release { };
+#endif
+
+error_already_set::~error_already_set() {
+ if (m_type) {
+ gil_scoped_acquire gil;
+ error_scope scope;
+ m_type.release().dec_ref();
+ m_value.release().dec_ref();
+ m_trace.release().dec_ref();
+ }
+}
+
+PYBIND11_NAMESPACE_BEGIN(detail)
+inline function get_type_override(const void *this_ptr, const type_info *this_type, const char *name) {
+ handle self = get_object_handle(this_ptr, this_type);
+ if (!self)
+ return function();
+ handle type = type::handle_of(self);
+ auto key = std::make_pair(type.ptr(), name);
+
+ /* Cache functions that aren't overridden in Python to avoid
+ many costly Python dictionary lookups below */
+ auto &cache = get_internals().inactive_override_cache;
+ if (cache.find(key) != cache.end())
+ return function();
+
+ function override = getattr(self, name, function());
+ if (override.is_cpp_function()) {
+ cache.insert(key);
+ return function();
+ }
+
+ /* Don't call dispatch code if invoked from overridden function.
+ Unfortunately this doesn't work on PyPy. */
+#if !defined(PYPY_VERSION)
+ PyFrameObject *frame = PyThreadState_Get()->frame;
+ if (frame && (std::string) str(frame->f_code->co_name) == name &&
+ frame->f_code->co_argcount > 0) {
+ PyFrame_FastToLocals(frame);
+ PyObject *self_caller = PyDict_GetItem(
+ frame->f_locals, PyTuple_GET_ITEM(frame->f_code->co_varnames, 0));
+ if (self_caller == self.ptr())
+ return function();
+ }
+#else
+ /* PyPy currently doesn't provide a detailed cpyext emulation of
+ frame objects, so we have to emulate this using Python. This
+ is going to be slow..*/
+ dict d; d["self"] = self; d["name"] = pybind11::str(name);
+ PyObject *result = PyRun_String(
+ "import inspect\n"
+ "frame = inspect.currentframe()\n"
+ "if frame is not None:\n"
+ " frame = frame.f_back\n"
+ " if frame is not None and str(frame.f_code.co_name) == name and "
+ "frame.f_code.co_argcount > 0:\n"
+ " self_caller = frame.f_locals[frame.f_code.co_varnames[0]]\n"
+ " if self_caller == self:\n"
+ " self = None\n",
+ Py_file_input, d.ptr(), d.ptr());
+ if (result == nullptr)
+ throw error_already_set();
+ if (d["self"].is_none())
+ return function();
+ Py_DECREF(result);
+#endif
+
+ return override;
+}
+PYBIND11_NAMESPACE_END(detail)
+
+/** \rst
+ Try to retrieve a python method by the provided name from the instance pointed to by the this_ptr.
+
+ :this_ptr: The pointer to the object the overriden method should be retrieved for. This should be
+ the first non-trampoline class encountered in the inheritance chain.
+ :name: The name of the overridden Python method to retrieve.
+ :return: The Python method by this name from the object or an empty function wrapper.
+ \endrst */
+template <class T> function get_override(const T *this_ptr, const char *name) {
+ auto tinfo = detail::get_type_info(typeid(T));
+ return tinfo ? detail::get_type_override(this_ptr, tinfo, name) : function();
+}
+
+#define PYBIND11_OVERRIDE_IMPL(ret_type, cname, name, ...) \
+ do { \
+ pybind11::gil_scoped_acquire gil; \
+ pybind11::function override = pybind11::get_override(static_cast<const cname *>(this), name); \
+ if (override) { \
+ auto o = override(__VA_ARGS__); \
+ if (pybind11::detail::cast_is_temporary_value_reference<ret_type>::value) { \
+ static pybind11::detail::override_caster_t<ret_type> caster; \
+ return pybind11::detail::cast_ref<ret_type>(std::move(o), caster); \
+ } \
+ else return pybind11::detail::cast_safe<ret_type>(std::move(o)); \
+ } \
+ } while (false)
+
+/** \rst
+ Macro to populate the virtual method in the trampoline class. This macro tries to look up a method named 'fn'
+ from the Python side, deals with the :ref:`gil` and necessary argument conversions to call this method and return
+ the appropriate type. See :ref:`overriding_virtuals` for more information. This macro should be used when the method
+ name in C is not the same as the method name in Python. For example with `__str__`.
+
+ .. code-block:: cpp
+
+ std::string toString() override {
+ PYBIND11_OVERRIDE_NAME(
+ std::string, // Return type (ret_type)
+ Animal, // Parent class (cname)
+ "__str__", // Name of method in Python (name)
+ toString, // Name of function in C++ (fn)
+ );
+ }
+\endrst */
+#define PYBIND11_OVERRIDE_NAME(ret_type, cname, name, fn, ...) \
+ do { \
+ PYBIND11_OVERRIDE_IMPL(PYBIND11_TYPE(ret_type), PYBIND11_TYPE(cname), name, __VA_ARGS__); \
+ return cname::fn(__VA_ARGS__); \
+ } while (false)
+
+/** \rst
+ Macro for pure virtual functions, this function is identical to :c:macro:`PYBIND11_OVERRIDE_NAME`, except that it
+ throws if no override can be found.
+\endrst */
+#define PYBIND11_OVERRIDE_PURE_NAME(ret_type, cname, name, fn, ...) \
+ do { \
+ PYBIND11_OVERRIDE_IMPL(PYBIND11_TYPE(ret_type), PYBIND11_TYPE(cname), name, __VA_ARGS__); \
+ pybind11::pybind11_fail("Tried to call pure virtual function \"" PYBIND11_STRINGIFY(cname) "::" name "\""); \
+ } while (false)
+
+/** \rst
+ Macro to populate the virtual method in the trampoline class. This macro tries to look up the method
+ from the Python side, deals with the :ref:`gil` and necessary argument conversions to call this method and return
+ the appropriate type. This macro should be used if the method name in C and in Python are identical.
+ See :ref:`overriding_virtuals` for more information.
+
+ .. code-block:: cpp
+
+ class PyAnimal : public Animal {
+ public:
+ // Inherit the constructors
+ using Animal::Animal;
+
+ // Trampoline (need one for each virtual function)
+ std::string go(int n_times) override {
+ PYBIND11_OVERRIDE_PURE(
+ std::string, // Return type (ret_type)
+ Animal, // Parent class (cname)
+ go, // Name of function in C++ (must match Python name) (fn)
+ n_times // Argument(s) (...)
+ );
+ }
+ };
+\endrst */
+#define PYBIND11_OVERRIDE(ret_type, cname, fn, ...) \
+ PYBIND11_OVERRIDE_NAME(PYBIND11_TYPE(ret_type), PYBIND11_TYPE(cname), #fn, fn, __VA_ARGS__)
+
+/** \rst
+ Macro for pure virtual functions, this function is identical to :c:macro:`PYBIND11_OVERRIDE`, except that it throws
+ if no override can be found.
+\endrst */
+#define PYBIND11_OVERRIDE_PURE(ret_type, cname, fn, ...) \
+ PYBIND11_OVERRIDE_PURE_NAME(PYBIND11_TYPE(ret_type), PYBIND11_TYPE(cname), #fn, fn, __VA_ARGS__)
+
+
+// Deprecated versions
+
+PYBIND11_DEPRECATED("get_type_overload has been deprecated")
+inline function get_type_overload(const void *this_ptr, const detail::type_info *this_type, const char *name) {
+ return detail::get_type_override(this_ptr, this_type, name);
+}
+
+template <class T>
+inline function get_overload(const T *this_ptr, const char *name) {
+ return get_override(this_ptr, name);
+}
+
+#define PYBIND11_OVERLOAD_INT(ret_type, cname, name, ...) \
+ PYBIND11_OVERRIDE_IMPL(PYBIND11_TYPE(ret_type), PYBIND11_TYPE(cname), name, __VA_ARGS__)
+#define PYBIND11_OVERLOAD_NAME(ret_type, cname, name, fn, ...) \
+ PYBIND11_OVERRIDE_NAME(PYBIND11_TYPE(ret_type), PYBIND11_TYPE(cname), name, fn, __VA_ARGS__)
+#define PYBIND11_OVERLOAD_PURE_NAME(ret_type, cname, name, fn, ...) \
+ PYBIND11_OVERRIDE_PURE_NAME(PYBIND11_TYPE(ret_type), PYBIND11_TYPE(cname), name, fn, __VA_ARGS__);
+#define PYBIND11_OVERLOAD(ret_type, cname, fn, ...) \
+ PYBIND11_OVERRIDE(PYBIND11_TYPE(ret_type), PYBIND11_TYPE(cname), fn, __VA_ARGS__)
+#define PYBIND11_OVERLOAD_PURE(ret_type, cname, fn, ...) \
+ PYBIND11_OVERRIDE_PURE(PYBIND11_TYPE(ret_type), PYBIND11_TYPE(cname), fn, __VA_ARGS__);
+
+PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
+
+#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
+# pragma warning(pop)
+#elif defined(__GNUG__) && !defined(__clang__)
+# pragma GCC diagnostic pop
+#endif
--- /dev/null
+/*
+ pybind11/pytypes.h: Convenience wrapper classes for basic Python types
+
+ Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>
+
+ All rights reserved. Use of this source code is governed by a
+ BSD-style license that can be found in the LICENSE file.
+*/
+
+#pragma once
+
+#include "detail/common.h"
+#include "buffer_info.h"
+#include <utility>
+#include <type_traits>
+
+PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
+
+/* A few forward declarations */
+class handle; class object;
+class str; class iterator;
+class type;
+struct arg; struct arg_v;
+
+PYBIND11_NAMESPACE_BEGIN(detail)
+class args_proxy;
+inline bool isinstance_generic(handle obj, const std::type_info &tp);
+
+// Accessor forward declarations
+template <typename Policy> class accessor;
+namespace accessor_policies {
+ struct obj_attr;
+ struct str_attr;
+ struct generic_item;
+ struct sequence_item;
+ struct list_item;
+ struct tuple_item;
+} // namespace accessor_policies
+using obj_attr_accessor = accessor<accessor_policies::obj_attr>;
+using str_attr_accessor = accessor<accessor_policies::str_attr>;
+using item_accessor = accessor<accessor_policies::generic_item>;
+using sequence_accessor = accessor<accessor_policies::sequence_item>;
+using list_accessor = accessor<accessor_policies::list_item>;
+using tuple_accessor = accessor<accessor_policies::tuple_item>;
+
+/// Tag and check to identify a class which implements the Python object API
+class pyobject_tag { };
+template <typename T> using is_pyobject = std::is_base_of<pyobject_tag, remove_reference_t<T>>;
+
+/** \rst
+ A mixin class which adds common functions to `handle`, `object` and various accessors.
+ The only requirement for `Derived` is to implement ``PyObject *Derived::ptr() const``.
+\endrst */
+template <typename Derived>
+class object_api : public pyobject_tag {
+ const Derived &derived() const { return static_cast<const Derived &>(*this); }
+
+public:
+ /** \rst
+ Return an iterator equivalent to calling ``iter()`` in Python. The object
+ must be a collection which supports the iteration protocol.
+ \endrst */
+ iterator begin() const;
+ /// Return a sentinel which ends iteration.
+ iterator end() const;
+
+ /** \rst
+ Return an internal functor to invoke the object's sequence protocol. Casting
+ the returned ``detail::item_accessor`` instance to a `handle` or `object`
+ subclass causes a corresponding call to ``__getitem__``. Assigning a `handle`
+ or `object` subclass causes a call to ``__setitem__``.
+ \endrst */
+ item_accessor operator[](handle key) const;
+ /// See above (the only difference is that they key is provided as a string literal)
+ item_accessor operator[](const char *key) const;
+
+ /** \rst
+ Return an internal functor to access the object's attributes. Casting the
+ returned ``detail::obj_attr_accessor`` instance to a `handle` or `object`
+ subclass causes a corresponding call to ``getattr``. Assigning a `handle`
+ or `object` subclass causes a call to ``setattr``.
+ \endrst */
+ obj_attr_accessor attr(handle key) const;
+ /// See above (the only difference is that they key is provided as a string literal)
+ str_attr_accessor attr(const char *key) const;
+
+ /** \rst
+ Matches * unpacking in Python, e.g. to unpack arguments out of a ``tuple``
+ or ``list`` for a function call. Applying another * to the result yields
+ ** unpacking, e.g. to unpack a dict as function keyword arguments.
+ See :ref:`calling_python_functions`.
+ \endrst */
+ args_proxy operator*() const;
+
+ /// Check if the given item is contained within this object, i.e. ``item in obj``.
+ template <typename T> bool contains(T &&item) const;
+
+ /** \rst
+ Assuming the Python object is a function or implements the ``__call__``
+ protocol, ``operator()`` invokes the underlying function, passing an
+ arbitrary set of parameters. The result is returned as a `object` and
+ may need to be converted back into a Python object using `handle::cast()`.
+
+ When some of the arguments cannot be converted to Python objects, the
+ function will throw a `cast_error` exception. When the Python function
+ call fails, a `error_already_set` exception is thrown.
+ \endrst */
+ template <return_value_policy policy = return_value_policy::automatic_reference, typename... Args>
+ object operator()(Args &&...args) const;
+ template <return_value_policy policy = return_value_policy::automatic_reference, typename... Args>
+ PYBIND11_DEPRECATED("call(...) was deprecated in favor of operator()(...)")
+ object call(Args&&... args) const;
+
+ /// Equivalent to ``obj is other`` in Python.
+ bool is(object_api const& other) const { return derived().ptr() == other.derived().ptr(); }
+ /// Equivalent to ``obj is None`` in Python.
+ bool is_none() const { return derived().ptr() == Py_None; }
+ /// Equivalent to obj == other in Python
+ bool equal(object_api const &other) const { return rich_compare(other, Py_EQ); }
+ bool not_equal(object_api const &other) const { return rich_compare(other, Py_NE); }
+ bool operator<(object_api const &other) const { return rich_compare(other, Py_LT); }
+ bool operator<=(object_api const &other) const { return rich_compare(other, Py_LE); }
+ bool operator>(object_api const &other) const { return rich_compare(other, Py_GT); }
+ bool operator>=(object_api const &other) const { return rich_compare(other, Py_GE); }
+
+ object operator-() const;
+ object operator~() const;
+ object operator+(object_api const &other) const;
+ object operator+=(object_api const &other) const;
+ object operator-(object_api const &other) const;
+ object operator-=(object_api const &other) const;
+ object operator*(object_api const &other) const;
+ object operator*=(object_api const &other) const;
+ object operator/(object_api const &other) const;
+ object operator/=(object_api const &other) const;
+ object operator|(object_api const &other) const;
+ object operator|=(object_api const &other) const;
+ object operator&(object_api const &other) const;
+ object operator&=(object_api const &other) const;
+ object operator^(object_api const &other) const;
+ object operator^=(object_api const &other) const;
+ object operator<<(object_api const &other) const;
+ object operator<<=(object_api const &other) const;
+ object operator>>(object_api const &other) const;
+ object operator>>=(object_api const &other) const;
+
+ PYBIND11_DEPRECATED("Use py::str(obj) instead")
+ pybind11::str str() const;
+
+ /// Get or set the object's docstring, i.e. ``obj.__doc__``.
+ str_attr_accessor doc() const;
+
+ /// Return the object's current reference count
+ int ref_count() const { return static_cast<int>(Py_REFCNT(derived().ptr())); }
+
+ // TODO PYBIND11_DEPRECATED("Call py::type::handle_of(h) or py::type::of(h) instead of h.get_type()")
+ handle get_type() const;
+
+private:
+ bool rich_compare(object_api const &other, int value) const;
+};
+
+PYBIND11_NAMESPACE_END(detail)
+
+/** \rst
+ Holds a reference to a Python object (no reference counting)
+
+ The `handle` class is a thin wrapper around an arbitrary Python object (i.e. a
+ ``PyObject *`` in Python's C API). It does not perform any automatic reference
+ counting and merely provides a basic C++ interface to various Python API functions.
+
+ .. seealso::
+ The `object` class inherits from `handle` and adds automatic reference
+ counting features.
+\endrst */
+class handle : public detail::object_api<handle> {
+public:
+ /// The default constructor creates a handle with a ``nullptr``-valued pointer
+ handle() = default;
+ /// Creates a ``handle`` from the given raw Python object pointer
+ handle(PyObject *ptr) : m_ptr(ptr) { } // Allow implicit conversion from PyObject*
+
+ /// Return the underlying ``PyObject *`` pointer
+ PyObject *ptr() const { return m_ptr; }
+ PyObject *&ptr() { return m_ptr; }
+
+ /** \rst
+ Manually increase the reference count of the Python object. Usually, it is
+ preferable to use the `object` class which derives from `handle` and calls
+ this function automatically. Returns a reference to itself.
+ \endrst */
+ const handle& inc_ref() const & { Py_XINCREF(m_ptr); return *this; }
+
+ /** \rst
+ Manually decrease the reference count of the Python object. Usually, it is
+ preferable to use the `object` class which derives from `handle` and calls
+ this function automatically. Returns a reference to itself.
+ \endrst */
+ const handle& dec_ref() const & { Py_XDECREF(m_ptr); return *this; }
+
+ /** \rst
+ Attempt to cast the Python object into the given C++ type. A `cast_error`
+ will be throw upon failure.
+ \endrst */
+ template <typename T> T cast() const;
+ /// Return ``true`` when the `handle` wraps a valid Python object
+ explicit operator bool() const { return m_ptr != nullptr; }
+ /** \rst
+ Deprecated: Check that the underlying pointers are the same.
+ Equivalent to ``obj1 is obj2`` in Python.
+ \endrst */
+ PYBIND11_DEPRECATED("Use obj1.is(obj2) instead")
+ bool operator==(const handle &h) const { return m_ptr == h.m_ptr; }
+ PYBIND11_DEPRECATED("Use !obj1.is(obj2) instead")
+ bool operator!=(const handle &h) const { return m_ptr != h.m_ptr; }
+ PYBIND11_DEPRECATED("Use handle::operator bool() instead")
+ bool check() const { return m_ptr != nullptr; }
+protected:
+ PyObject *m_ptr = nullptr;
+};
+
+/** \rst
+ Holds a reference to a Python object (with reference counting)
+
+ Like `handle`, the `object` class is a thin wrapper around an arbitrary Python
+ object (i.e. a ``PyObject *`` in Python's C API). In contrast to `handle`, it
+ optionally increases the object's reference count upon construction, and it
+ *always* decreases the reference count when the `object` instance goes out of
+ scope and is destructed. When using `object` instances consistently, it is much
+ easier to get reference counting right at the first attempt.
+\endrst */
+class object : public handle {
+public:
+ object() = default;
+ PYBIND11_DEPRECATED("Use reinterpret_borrow<object>() or reinterpret_steal<object>()")
+ object(handle h, bool is_borrowed) : handle(h) { if (is_borrowed) inc_ref(); }
+ /// Copy constructor; always increases the reference count
+ object(const object &o) : handle(o) { inc_ref(); }
+ /// Move constructor; steals the object from ``other`` and preserves its reference count
+ object(object &&other) noexcept { m_ptr = other.m_ptr; other.m_ptr = nullptr; }
+ /// Destructor; automatically calls `handle::dec_ref()`
+ ~object() { dec_ref(); }
+
+ /** \rst
+ Resets the internal pointer to ``nullptr`` without decreasing the
+ object's reference count. The function returns a raw handle to the original
+ Python object.
+ \endrst */
+ handle release() {
+ PyObject *tmp = m_ptr;
+ m_ptr = nullptr;
+ return handle(tmp);
+ }
+
+ object& operator=(const object &other) {
+ other.inc_ref();
+ dec_ref();
+ m_ptr = other.m_ptr;
+ return *this;
+ }
+
+ object& operator=(object &&other) noexcept {
+ if (this != &other) {
+ handle temp(m_ptr);
+ m_ptr = other.m_ptr;
+ other.m_ptr = nullptr;
+ temp.dec_ref();
+ }
+ return *this;
+ }
+
+ // Calling cast() on an object lvalue just copies (via handle::cast)
+ template <typename T> T cast() const &;
+ // Calling on an object rvalue does a move, if needed and/or possible
+ template <typename T> T cast() &&;
+
+protected:
+ // Tags for choosing constructors from raw PyObject *
+ struct borrowed_t { };
+ struct stolen_t { };
+
+ template <typename T> friend T reinterpret_borrow(handle);
+ template <typename T> friend T reinterpret_steal(handle);
+
+public:
+ // Only accessible from derived classes and the reinterpret_* functions
+ object(handle h, borrowed_t) : handle(h) { inc_ref(); }
+ object(handle h, stolen_t) : handle(h) { }
+};
+
+/** \rst
+ Declare that a `handle` or ``PyObject *`` is a certain type and borrow the reference.
+ The target type ``T`` must be `object` or one of its derived classes. The function
+ doesn't do any conversions or checks. It's up to the user to make sure that the
+ target type is correct.
+
+ .. code-block:: cpp
+
+ PyObject *p = PyList_GetItem(obj, index);
+ py::object o = reinterpret_borrow<py::object>(p);
+ // or
+ py::tuple t = reinterpret_borrow<py::tuple>(p); // <-- `p` must be already be a `tuple`
+\endrst */
+template <typename T> T reinterpret_borrow(handle h) { return {h, object::borrowed_t{}}; }
+
+/** \rst
+ Like `reinterpret_borrow`, but steals the reference.
+
+ .. code-block:: cpp
+
+ PyObject *p = PyObject_Str(obj);
+ py::str s = reinterpret_steal<py::str>(p); // <-- `p` must be already be a `str`
+\endrst */
+template <typename T> T reinterpret_steal(handle h) { return {h, object::stolen_t{}}; }
+
+PYBIND11_NAMESPACE_BEGIN(detail)
+inline std::string error_string();
+PYBIND11_NAMESPACE_END(detail)
+
+/// Fetch and hold an error which was already set in Python. An instance of this is typically
+/// thrown to propagate python-side errors back through C++ which can either be caught manually or
+/// else falls back to the function dispatcher (which then raises the captured error back to
+/// python).
+class error_already_set : public std::runtime_error {
+public:
+ /// Constructs a new exception from the current Python error indicator, if any. The current
+ /// Python error indicator will be cleared.
+ error_already_set() : std::runtime_error(detail::error_string()) {
+ PyErr_Fetch(&m_type.ptr(), &m_value.ptr(), &m_trace.ptr());
+ }
+
+ error_already_set(const error_already_set &) = default;
+ error_already_set(error_already_set &&) = default;
+
+ inline ~error_already_set() override;
+
+ /// Give the currently-held error back to Python, if any. If there is currently a Python error
+ /// already set it is cleared first. After this call, the current object no longer stores the
+ /// error variables (but the `.what()` string is still available).
+ void restore() { PyErr_Restore(m_type.release().ptr(), m_value.release().ptr(), m_trace.release().ptr()); }
+
+ /// If it is impossible to raise the currently-held error, such as in destructor, we can write
+ /// it out using Python's unraisable hook (sys.unraisablehook). The error context should be
+ /// some object whose repr() helps identify the location of the error. Python already knows the
+ /// type and value of the error, so there is no need to repeat that. For example, __func__ could
+ /// be helpful. After this call, the current object no longer stores the error variables,
+ /// and neither does Python.
+ void discard_as_unraisable(object err_context) {
+ restore();
+ PyErr_WriteUnraisable(err_context.ptr());
+ }
+ void discard_as_unraisable(const char *err_context) {
+ discard_as_unraisable(reinterpret_steal<object>(PYBIND11_FROM_STRING(err_context)));
+ }
+
+ // Does nothing; provided for backwards compatibility.
+ PYBIND11_DEPRECATED("Use of error_already_set.clear() is deprecated")
+ void clear() {}
+
+ /// Check if the currently trapped error type matches the given Python exception class (or a
+ /// subclass thereof). May also be passed a tuple to search for any exception class matches in
+ /// the given tuple.
+ bool matches(handle exc) const { return PyErr_GivenExceptionMatches(m_type.ptr(), exc.ptr()); }
+
+ const object& type() const { return m_type; }
+ const object& value() const { return m_value; }
+ const object& trace() const { return m_trace; }
+
+private:
+ object m_type, m_value, m_trace;
+};
+
+/** \defgroup python_builtins _
+ Unless stated otherwise, the following C++ functions behave the same
+ as their Python counterparts.
+ */
+
+/** \ingroup python_builtins
+ \rst
+ Return true if ``obj`` is an instance of ``T``. Type ``T`` must be a subclass of
+ `object` or a class which was exposed to Python as ``py::class_<T>``.
+\endrst */
+template <typename T, detail::enable_if_t<std::is_base_of<object, T>::value, int> = 0>
+bool isinstance(handle obj) { return T::check_(obj); }
+
+template <typename T, detail::enable_if_t<!std::is_base_of<object, T>::value, int> = 0>
+bool isinstance(handle obj) { return detail::isinstance_generic(obj, typeid(T)); }
+
+template <> inline bool isinstance<handle>(handle) = delete;
+template <> inline bool isinstance<object>(handle obj) { return obj.ptr() != nullptr; }
+
+/// \ingroup python_builtins
+/// Return true if ``obj`` is an instance of the ``type``.
+inline bool isinstance(handle obj, handle type) {
+ const auto result = PyObject_IsInstance(obj.ptr(), type.ptr());
+ if (result == -1)
+ throw error_already_set();
+ return result != 0;
+}
+
+/// \addtogroup python_builtins
+/// @{
+inline bool hasattr(handle obj, handle name) {
+ return PyObject_HasAttr(obj.ptr(), name.ptr()) == 1;
+}
+
+inline bool hasattr(handle obj, const char *name) {
+ return PyObject_HasAttrString(obj.ptr(), name) == 1;
+}
+
+inline void delattr(handle obj, handle name) {
+ if (PyObject_DelAttr(obj.ptr(), name.ptr()) != 0) { throw error_already_set(); }
+}
+
+inline void delattr(handle obj, const char *name) {
+ if (PyObject_DelAttrString(obj.ptr(), name) != 0) { throw error_already_set(); }
+}
+
+inline object getattr(handle obj, handle name) {
+ PyObject *result = PyObject_GetAttr(obj.ptr(), name.ptr());
+ if (!result) { throw error_already_set(); }
+ return reinterpret_steal<object>(result);
+}
+
+inline object getattr(handle obj, const char *name) {
+ PyObject *result = PyObject_GetAttrString(obj.ptr(), name);
+ if (!result) { throw error_already_set(); }
+ return reinterpret_steal<object>(result);
+}
+
+inline object getattr(handle obj, handle name, handle default_) {
+ if (PyObject *result = PyObject_GetAttr(obj.ptr(), name.ptr())) {
+ return reinterpret_steal<object>(result);
+ } else {
+ PyErr_Clear();
+ return reinterpret_borrow<object>(default_);
+ }
+}
+
+inline object getattr(handle obj, const char *name, handle default_) {
+ if (PyObject *result = PyObject_GetAttrString(obj.ptr(), name)) {
+ return reinterpret_steal<object>(result);
+ } else {
+ PyErr_Clear();
+ return reinterpret_borrow<object>(default_);
+ }
+}
+
+inline void setattr(handle obj, handle name, handle value) {
+ if (PyObject_SetAttr(obj.ptr(), name.ptr(), value.ptr()) != 0) { throw error_already_set(); }
+}
+
+inline void setattr(handle obj, const char *name, handle value) {
+ if (PyObject_SetAttrString(obj.ptr(), name, value.ptr()) != 0) { throw error_already_set(); }
+}
+
+inline ssize_t hash(handle obj) {
+ auto h = PyObject_Hash(obj.ptr());
+ if (h == -1) { throw error_already_set(); }
+ return h;
+}
+
+/// @} python_builtins
+
+PYBIND11_NAMESPACE_BEGIN(detail)
+inline handle get_function(handle value) {
+ if (value) {
+#if PY_MAJOR_VERSION >= 3
+ if (PyInstanceMethod_Check(value.ptr()))
+ value = PyInstanceMethod_GET_FUNCTION(value.ptr());
+ else
+#endif
+ if (PyMethod_Check(value.ptr()))
+ value = PyMethod_GET_FUNCTION(value.ptr());
+ }
+ return value;
+}
+
+// Helper aliases/functions to support implicit casting of values given to python accessors/methods.
+// When given a pyobject, this simply returns the pyobject as-is; for other C++ type, the value goes
+// through pybind11::cast(obj) to convert it to an `object`.
+template <typename T, enable_if_t<is_pyobject<T>::value, int> = 0>
+auto object_or_cast(T &&o) -> decltype(std::forward<T>(o)) { return std::forward<T>(o); }
+// The following casting version is implemented in cast.h:
+template <typename T, enable_if_t<!is_pyobject<T>::value, int> = 0>
+object object_or_cast(T &&o);
+// Match a PyObject*, which we want to convert directly to handle via its converting constructor
+inline handle object_or_cast(PyObject *ptr) { return ptr; }
+
+template <typename Policy>
+class accessor : public object_api<accessor<Policy>> {
+ using key_type = typename Policy::key_type;
+
+public:
+ accessor(handle obj, key_type key) : obj(obj), key(std::move(key)) { }
+ accessor(const accessor &) = default;
+ accessor(accessor &&) = default;
+
+ // accessor overload required to override default assignment operator (templates are not allowed
+ // to replace default compiler-generated assignments).
+ void operator=(const accessor &a) && { std::move(*this).operator=(handle(a)); }
+ void operator=(const accessor &a) & { operator=(handle(a)); }
+
+ template <typename T> void operator=(T &&value) && {
+ Policy::set(obj, key, object_or_cast(std::forward<T>(value)));
+ }
+ template <typename T> void operator=(T &&value) & {
+ get_cache() = reinterpret_borrow<object>(object_or_cast(std::forward<T>(value)));
+ }
+
+ template <typename T = Policy>
+ PYBIND11_DEPRECATED("Use of obj.attr(...) as bool is deprecated in favor of pybind11::hasattr(obj, ...)")
+ explicit operator enable_if_t<std::is_same<T, accessor_policies::str_attr>::value ||
+ std::is_same<T, accessor_policies::obj_attr>::value, bool>() const {
+ return hasattr(obj, key);
+ }
+ template <typename T = Policy>
+ PYBIND11_DEPRECATED("Use of obj[key] as bool is deprecated in favor of obj.contains(key)")
+ explicit operator enable_if_t<std::is_same<T, accessor_policies::generic_item>::value, bool>() const {
+ return obj.contains(key);
+ }
+
+ operator object() const { return get_cache(); }
+ PyObject *ptr() const { return get_cache().ptr(); }
+ template <typename T> T cast() const { return get_cache().template cast<T>(); }
+
+private:
+ object &get_cache() const {
+ if (!cache) { cache = Policy::get(obj, key); }
+ return cache;
+ }
+
+private:
+ handle obj;
+ key_type key;
+ mutable object cache;
+};
+
+PYBIND11_NAMESPACE_BEGIN(accessor_policies)
+struct obj_attr {
+ using key_type = object;
+ static object get(handle obj, handle key) { return getattr(obj, key); }
+ static void set(handle obj, handle key, handle val) { setattr(obj, key, val); }
+};
+
+struct str_attr {
+ using key_type = const char *;
+ static object get(handle obj, const char *key) { return getattr(obj, key); }
+ static void set(handle obj, const char *key, handle val) { setattr(obj, key, val); }
+};
+
+struct generic_item {
+ using key_type = object;
+
+ static object get(handle obj, handle key) {
+ PyObject *result = PyObject_GetItem(obj.ptr(), key.ptr());
+ if (!result) { throw error_already_set(); }
+ return reinterpret_steal<object>(result);
+ }
+
+ static void set(handle obj, handle key, handle val) {
+ if (PyObject_SetItem(obj.ptr(), key.ptr(), val.ptr()) != 0) { throw error_already_set(); }
+ }
+};
+
+struct sequence_item {
+ using key_type = size_t;
+
+ static object get(handle obj, size_t index) {
+ PyObject *result = PySequence_GetItem(obj.ptr(), static_cast<ssize_t>(index));
+ if (!result) { throw error_already_set(); }
+ return reinterpret_steal<object>(result);
+ }
+
+ static void set(handle obj, size_t index, handle val) {
+ // PySequence_SetItem does not steal a reference to 'val'
+ if (PySequence_SetItem(obj.ptr(), static_cast<ssize_t>(index), val.ptr()) != 0) {
+ throw error_already_set();
+ }
+ }
+};
+
+struct list_item {
+ using key_type = size_t;
+
+ static object get(handle obj, size_t index) {
+ PyObject *result = PyList_GetItem(obj.ptr(), static_cast<ssize_t>(index));
+ if (!result) { throw error_already_set(); }
+ return reinterpret_borrow<object>(result);
+ }
+
+ static void set(handle obj, size_t index, handle val) {
+ // PyList_SetItem steals a reference to 'val'
+ if (PyList_SetItem(obj.ptr(), static_cast<ssize_t>(index), val.inc_ref().ptr()) != 0) {
+ throw error_already_set();
+ }
+ }
+};
+
+struct tuple_item {
+ using key_type = size_t;
+
+ static object get(handle obj, size_t index) {
+ PyObject *result = PyTuple_GetItem(obj.ptr(), static_cast<ssize_t>(index));
+ if (!result) { throw error_already_set(); }
+ return reinterpret_borrow<object>(result);
+ }
+
+ static void set(handle obj, size_t index, handle val) {
+ // PyTuple_SetItem steals a reference to 'val'
+ if (PyTuple_SetItem(obj.ptr(), static_cast<ssize_t>(index), val.inc_ref().ptr()) != 0) {
+ throw error_already_set();
+ }
+ }
+};
+PYBIND11_NAMESPACE_END(accessor_policies)
+
+/// STL iterator template used for tuple, list, sequence and dict
+template <typename Policy>
+class generic_iterator : public Policy {
+ using It = generic_iterator;
+
+public:
+ using difference_type = ssize_t;
+ using iterator_category = typename Policy::iterator_category;
+ using value_type = typename Policy::value_type;
+ using reference = typename Policy::reference;
+ using pointer = typename Policy::pointer;
+
+ generic_iterator() = default;
+ generic_iterator(handle seq, ssize_t index) : Policy(seq, index) { }
+
+ reference operator*() const { return Policy::dereference(); }
+ reference operator[](difference_type n) const { return *(*this + n); }
+ pointer operator->() const { return **this; }
+
+ It &operator++() { Policy::increment(); return *this; }
+ It operator++(int) { auto copy = *this; Policy::increment(); return copy; }
+ It &operator--() { Policy::decrement(); return *this; }
+ It operator--(int) { auto copy = *this; Policy::decrement(); return copy; }
+ It &operator+=(difference_type n) { Policy::advance(n); return *this; }
+ It &operator-=(difference_type n) { Policy::advance(-n); return *this; }
+
+ friend It operator+(const It &a, difference_type n) { auto copy = a; return copy += n; }
+ friend It operator+(difference_type n, const It &b) { return b + n; }
+ friend It operator-(const It &a, difference_type n) { auto copy = a; return copy -= n; }
+ friend difference_type operator-(const It &a, const It &b) { return a.distance_to(b); }
+
+ friend bool operator==(const It &a, const It &b) { return a.equal(b); }
+ friend bool operator!=(const It &a, const It &b) { return !(a == b); }
+ friend bool operator< (const It &a, const It &b) { return b - a > 0; }
+ friend bool operator> (const It &a, const It &b) { return b < a; }
+ friend bool operator>=(const It &a, const It &b) { return !(a < b); }
+ friend bool operator<=(const It &a, const It &b) { return !(a > b); }
+};
+
+PYBIND11_NAMESPACE_BEGIN(iterator_policies)
+/// Quick proxy class needed to implement ``operator->`` for iterators which can't return pointers
+template <typename T>
+struct arrow_proxy {
+ T value;
+
+ arrow_proxy(T &&value) : value(std::move(value)) { }
+ T *operator->() const { return &value; }
+};
+
+/// Lightweight iterator policy using just a simple pointer: see ``PySequence_Fast_ITEMS``
+class sequence_fast_readonly {
+protected:
+ using iterator_category = std::random_access_iterator_tag;
+ using value_type = handle;
+ using reference = const handle;
+ using pointer = arrow_proxy<const handle>;
+
+ sequence_fast_readonly(handle obj, ssize_t n) : ptr(PySequence_Fast_ITEMS(obj.ptr()) + n) { }
+
+ reference dereference() const { return *ptr; }
+ void increment() { ++ptr; }
+ void decrement() { --ptr; }
+ void advance(ssize_t n) { ptr += n; }
+ bool equal(const sequence_fast_readonly &b) const { return ptr == b.ptr; }
+ ssize_t distance_to(const sequence_fast_readonly &b) const { return ptr - b.ptr; }
+
+private:
+ PyObject **ptr;
+};
+
+/// Full read and write access using the sequence protocol: see ``detail::sequence_accessor``
+class sequence_slow_readwrite {
+protected:
+ using iterator_category = std::random_access_iterator_tag;
+ using value_type = object;
+ using reference = sequence_accessor;
+ using pointer = arrow_proxy<const sequence_accessor>;
+
+ sequence_slow_readwrite(handle obj, ssize_t index) : obj(obj), index(index) { }
+
+ reference dereference() const { return {obj, static_cast<size_t>(index)}; }
+ void increment() { ++index; }
+ void decrement() { --index; }
+ void advance(ssize_t n) { index += n; }
+ bool equal(const sequence_slow_readwrite &b) const { return index == b.index; }
+ ssize_t distance_to(const sequence_slow_readwrite &b) const { return index - b.index; }
+
+private:
+ handle obj;
+ ssize_t index;
+};
+
+/// Python's dictionary protocol permits this to be a forward iterator
+class dict_readonly {
+protected:
+ using iterator_category = std::forward_iterator_tag;
+ using value_type = std::pair<handle, handle>;
+ using reference = const value_type;
+ using pointer = arrow_proxy<const value_type>;
+
+ dict_readonly() = default;
+ dict_readonly(handle obj, ssize_t pos) : obj(obj), pos(pos) { increment(); }
+
+ reference dereference() const { return {key, value}; }
+ void increment() { if (!PyDict_Next(obj.ptr(), &pos, &key, &value)) { pos = -1; } }
+ bool equal(const dict_readonly &b) const { return pos == b.pos; }
+
+private:
+ handle obj;
+ PyObject *key = nullptr, *value = nullptr;
+ ssize_t pos = -1;
+};
+PYBIND11_NAMESPACE_END(iterator_policies)
+
+#if !defined(PYPY_VERSION)
+using tuple_iterator = generic_iterator<iterator_policies::sequence_fast_readonly>;
+using list_iterator = generic_iterator<iterator_policies::sequence_fast_readonly>;
+#else
+using tuple_iterator = generic_iterator<iterator_policies::sequence_slow_readwrite>;
+using list_iterator = generic_iterator<iterator_policies::sequence_slow_readwrite>;
+#endif
+
+using sequence_iterator = generic_iterator<iterator_policies::sequence_slow_readwrite>;
+using dict_iterator = generic_iterator<iterator_policies::dict_readonly>;
+
+inline bool PyIterable_Check(PyObject *obj) {
+ PyObject *iter = PyObject_GetIter(obj);
+ if (iter) {
+ Py_DECREF(iter);
+ return true;
+ } else {
+ PyErr_Clear();
+ return false;
+ }
+}
+
+inline bool PyNone_Check(PyObject *o) { return o == Py_None; }
+inline bool PyEllipsis_Check(PyObject *o) { return o == Py_Ellipsis; }
+
+inline bool PyUnicode_Check_Permissive(PyObject *o) { return PyUnicode_Check(o) || PYBIND11_BYTES_CHECK(o); }
+
+inline bool PyStaticMethod_Check(PyObject *o) { return o->ob_type == &PyStaticMethod_Type; }
+
+class kwargs_proxy : public handle {
+public:
+ explicit kwargs_proxy(handle h) : handle(h) { }
+};
+
+class args_proxy : public handle {
+public:
+ explicit args_proxy(handle h) : handle(h) { }
+ kwargs_proxy operator*() const { return kwargs_proxy(*this); }
+};
+
+/// Python argument categories (using PEP 448 terms)
+template <typename T> using is_keyword = std::is_base_of<arg, T>;
+template <typename T> using is_s_unpacking = std::is_same<args_proxy, T>; // * unpacking
+template <typename T> using is_ds_unpacking = std::is_same<kwargs_proxy, T>; // ** unpacking
+template <typename T> using is_positional = satisfies_none_of<T,
+ is_keyword, is_s_unpacking, is_ds_unpacking
+>;
+template <typename T> using is_keyword_or_ds = satisfies_any_of<T, is_keyword, is_ds_unpacking>;
+
+// Call argument collector forward declarations
+template <return_value_policy policy = return_value_policy::automatic_reference>
+class simple_collector;
+template <return_value_policy policy = return_value_policy::automatic_reference>
+class unpacking_collector;
+
+PYBIND11_NAMESPACE_END(detail)
+
+// TODO: After the deprecated constructors are removed, this macro can be simplified by
+// inheriting ctors: `using Parent::Parent`. It's not an option right now because
+// the `using` statement triggers the parent deprecation warning even if the ctor
+// isn't even used.
+#define PYBIND11_OBJECT_COMMON(Name, Parent, CheckFun) \
+ public: \
+ PYBIND11_DEPRECATED("Use reinterpret_borrow<"#Name">() or reinterpret_steal<"#Name">()") \
+ Name(handle h, bool is_borrowed) : Parent(is_borrowed ? Parent(h, borrowed_t{}) : Parent(h, stolen_t{})) { } \
+ Name(handle h, borrowed_t) : Parent(h, borrowed_t{}) { } \
+ Name(handle h, stolen_t) : Parent(h, stolen_t{}) { } \
+ PYBIND11_DEPRECATED("Use py::isinstance<py::python_type>(obj) instead") \
+ bool check() const { return m_ptr != nullptr && (bool) CheckFun(m_ptr); } \
+ static bool check_(handle h) { return h.ptr() != nullptr && CheckFun(h.ptr()); } \
+ template <typename Policy_> \
+ Name(const ::pybind11::detail::accessor<Policy_> &a) : Name(object(a)) { }
+
+#define PYBIND11_OBJECT_CVT(Name, Parent, CheckFun, ConvertFun) \
+ PYBIND11_OBJECT_COMMON(Name, Parent, CheckFun) \
+ /* This is deliberately not 'explicit' to allow implicit conversion from object: */ \
+ Name(const object &o) \
+ : Parent(check_(o) ? o.inc_ref().ptr() : ConvertFun(o.ptr()), stolen_t{}) \
+ { if (!m_ptr) throw error_already_set(); } \
+ Name(object &&o) \
+ : Parent(check_(o) ? o.release().ptr() : ConvertFun(o.ptr()), stolen_t{}) \
+ { if (!m_ptr) throw error_already_set(); }
+
+#define PYBIND11_OBJECT_CHECK_FAILED(Name, o) \
+ ::pybind11::type_error("Object of type '" + \
+ ::pybind11::detail::get_fully_qualified_tp_name(Py_TYPE(o.ptr())) + \
+ "' is not an instance of '" #Name "'")
+
+#define PYBIND11_OBJECT(Name, Parent, CheckFun) \
+ PYBIND11_OBJECT_COMMON(Name, Parent, CheckFun) \
+ /* This is deliberately not 'explicit' to allow implicit conversion from object: */ \
+ Name(const object &o) : Parent(o) \
+ { if (o && !check_(o)) throw PYBIND11_OBJECT_CHECK_FAILED(Name, o); } \
+ Name(object &&o) : Parent(std::move(o)) \
+ { if (o && !check_(o)) throw PYBIND11_OBJECT_CHECK_FAILED(Name, o); }
+
+#define PYBIND11_OBJECT_DEFAULT(Name, Parent, CheckFun) \
+ PYBIND11_OBJECT(Name, Parent, CheckFun) \
+ Name() : Parent() { }
+
+/// \addtogroup pytypes
+/// @{
+
+/** \rst
+ Wraps a Python iterator so that it can also be used as a C++ input iterator
+
+ Caveat: copying an iterator does not (and cannot) clone the internal
+ state of the Python iterable. This also applies to the post-increment
+ operator. This iterator should only be used to retrieve the current
+ value using ``operator*()``.
+\endrst */
+class iterator : public object {
+public:
+ using iterator_category = std::input_iterator_tag;
+ using difference_type = ssize_t;
+ using value_type = handle;
+ using reference = const handle;
+ using pointer = const handle *;
+
+ PYBIND11_OBJECT_DEFAULT(iterator, object, PyIter_Check)
+
+ iterator& operator++() {
+ advance();
+ return *this;
+ }
+
+ iterator operator++(int) {
+ auto rv = *this;
+ advance();
+ return rv;
+ }
+
+ reference operator*() const {
+ if (m_ptr && !value.ptr()) {
+ auto& self = const_cast<iterator &>(*this);
+ self.advance();
+ }
+ return value;
+ }
+
+ pointer operator->() const { operator*(); return &value; }
+
+ /** \rst
+ The value which marks the end of the iteration. ``it == iterator::sentinel()``
+ is equivalent to catching ``StopIteration`` in Python.
+
+ .. code-block:: cpp
+
+ void foo(py::iterator it) {
+ while (it != py::iterator::sentinel()) {
+ // use `*it`
+ ++it;
+ }
+ }
+ \endrst */
+ static iterator sentinel() { return {}; }
+
+ friend bool operator==(const iterator &a, const iterator &b) { return a->ptr() == b->ptr(); }
+ friend bool operator!=(const iterator &a, const iterator &b) { return a->ptr() != b->ptr(); }
+
+private:
+ void advance() {
+ value = reinterpret_steal<object>(PyIter_Next(m_ptr));
+ if (PyErr_Occurred()) { throw error_already_set(); }
+ }
+
+private:
+ object value = {};
+};
+
+
+
+class type : public object {
+public:
+ PYBIND11_OBJECT(type, object, PyType_Check)
+
+ /// Return a type handle from a handle or an object
+ static handle handle_of(handle h) { return handle((PyObject*) Py_TYPE(h.ptr())); }
+
+ /// Return a type object from a handle or an object
+ static type of(handle h) { return type(type::handle_of(h), borrowed_t{}); }
+
+ // Defined in pybind11/cast.h
+ /// Convert C++ type to handle if previously registered. Does not convert
+ /// standard types, like int, float. etc. yet.
+ /// See https://github.com/pybind/pybind11/issues/2486
+ template<typename T>
+ static handle handle_of();
+
+ /// Convert C++ type to type if previously registered. Does not convert
+ /// standard types, like int, float. etc. yet.
+ /// See https://github.com/pybind/pybind11/issues/2486
+ template<typename T>
+ static type of() {return type(type::handle_of<T>(), borrowed_t{}); }
+};
+
+class iterable : public object {
+public:
+ PYBIND11_OBJECT_DEFAULT(iterable, object, detail::PyIterable_Check)
+};
+
+class bytes;
+
+class str : public object {
+public:
+ PYBIND11_OBJECT_CVT(str, object, detail::PyUnicode_Check_Permissive, raw_str)
+
+ str(const char *c, size_t n)
+ : object(PyUnicode_FromStringAndSize(c, (ssize_t) n), stolen_t{}) {
+ if (!m_ptr) pybind11_fail("Could not allocate string object!");
+ }
+
+ // 'explicit' is explicitly omitted from the following constructors to allow implicit conversion to py::str from C++ string-like objects
+ str(const char *c = "")
+ : object(PyUnicode_FromString(c), stolen_t{}) {
+ if (!m_ptr) pybind11_fail("Could not allocate string object!");
+ }
+
+ str(const std::string &s) : str(s.data(), s.size()) { }
+
+ explicit str(const bytes &b);
+
+ /** \rst
+ Return a string representation of the object. This is analogous to
+ the ``str()`` function in Python.
+ \endrst */
+ explicit str(handle h) : object(raw_str(h.ptr()), stolen_t{}) { if (!m_ptr) throw error_already_set(); }
+
+ operator std::string() const {
+ object temp = *this;
+ if (PyUnicode_Check(m_ptr)) {
+ temp = reinterpret_steal<object>(PyUnicode_AsUTF8String(m_ptr));
+ if (!temp)
+ pybind11_fail("Unable to extract string contents! (encoding issue)");
+ }
+ char *buffer;
+ ssize_t length;
+ if (PYBIND11_BYTES_AS_STRING_AND_SIZE(temp.ptr(), &buffer, &length))
+ pybind11_fail("Unable to extract string contents! (invalid type)");
+ return std::string(buffer, (size_t) length);
+ }
+
+ template <typename... Args>
+ str format(Args &&...args) const {
+ return attr("format")(std::forward<Args>(args)...);
+ }
+
+private:
+ /// Return string representation -- always returns a new reference, even if already a str
+ static PyObject *raw_str(PyObject *op) {
+ PyObject *str_value = PyObject_Str(op);
+#if PY_MAJOR_VERSION < 3
+ if (!str_value) throw error_already_set();
+ PyObject *unicode = PyUnicode_FromEncodedObject(str_value, "utf-8", nullptr);
+ Py_XDECREF(str_value); str_value = unicode;
+#endif
+ return str_value;
+ }
+};
+/// @} pytypes
+
+inline namespace literals {
+/** \rst
+ String literal version of `str`
+ \endrst */
+inline str operator"" _s(const char *s, size_t size) { return {s, size}; }
+} // namespace literals
+
+/// \addtogroup pytypes
+/// @{
+class bytes : public object {
+public:
+ PYBIND11_OBJECT(bytes, object, PYBIND11_BYTES_CHECK)
+
+ // Allow implicit conversion:
+ bytes(const char *c = "")
+ : object(PYBIND11_BYTES_FROM_STRING(c), stolen_t{}) {
+ if (!m_ptr) pybind11_fail("Could not allocate bytes object!");
+ }
+
+ bytes(const char *c, size_t n)
+ : object(PYBIND11_BYTES_FROM_STRING_AND_SIZE(c, (ssize_t) n), stolen_t{}) {
+ if (!m_ptr) pybind11_fail("Could not allocate bytes object!");
+ }
+
+ // Allow implicit conversion:
+ bytes(const std::string &s) : bytes(s.data(), s.size()) { }
+
+ explicit bytes(const pybind11::str &s);
+
+ operator std::string() const {
+ char *buffer;
+ ssize_t length;
+ if (PYBIND11_BYTES_AS_STRING_AND_SIZE(m_ptr, &buffer, &length))
+ pybind11_fail("Unable to extract bytes contents!");
+ return std::string(buffer, (size_t) length);
+ }
+};
+// Note: breathe >= 4.17.0 will fail to build docs if the below two constructors
+// are included in the doxygen group; close here and reopen after as a workaround
+/// @} pytypes
+
+inline bytes::bytes(const pybind11::str &s) {
+ object temp = s;
+ if (PyUnicode_Check(s.ptr())) {
+ temp = reinterpret_steal<object>(PyUnicode_AsUTF8String(s.ptr()));
+ if (!temp)
+ pybind11_fail("Unable to extract string contents! (encoding issue)");
+ }
+ char *buffer;
+ ssize_t length;
+ if (PYBIND11_BYTES_AS_STRING_AND_SIZE(temp.ptr(), &buffer, &length))
+ pybind11_fail("Unable to extract string contents! (invalid type)");
+ auto obj = reinterpret_steal<object>(PYBIND11_BYTES_FROM_STRING_AND_SIZE(buffer, length));
+ if (!obj)
+ pybind11_fail("Could not allocate bytes object!");
+ m_ptr = obj.release().ptr();
+}
+
+inline str::str(const bytes& b) {
+ char *buffer;
+ ssize_t length;
+ if (PYBIND11_BYTES_AS_STRING_AND_SIZE(b.ptr(), &buffer, &length))
+ pybind11_fail("Unable to extract bytes contents!");
+ auto obj = reinterpret_steal<object>(PyUnicode_FromStringAndSize(buffer, (ssize_t) length));
+ if (!obj)
+ pybind11_fail("Could not allocate string object!");
+ m_ptr = obj.release().ptr();
+}
+
+/// \addtogroup pytypes
+/// @{
+class none : public object {
+public:
+ PYBIND11_OBJECT(none, object, detail::PyNone_Check)
+ none() : object(Py_None, borrowed_t{}) { }
+};
+
+class ellipsis : public object {
+public:
+ PYBIND11_OBJECT(ellipsis, object, detail::PyEllipsis_Check)
+ ellipsis() : object(Py_Ellipsis, borrowed_t{}) { }
+};
+
+class bool_ : public object {
+public:
+ PYBIND11_OBJECT_CVT(bool_, object, PyBool_Check, raw_bool)
+ bool_() : object(Py_False, borrowed_t{}) { }
+ // Allow implicit conversion from and to `bool`:
+ bool_(bool value) : object(value ? Py_True : Py_False, borrowed_t{}) { }
+ operator bool() const { return m_ptr && PyLong_AsLong(m_ptr) != 0; }
+
+private:
+ /// Return the truth value of an object -- always returns a new reference
+ static PyObject *raw_bool(PyObject *op) {
+ const auto value = PyObject_IsTrue(op);
+ if (value == -1) return nullptr;
+ return handle(value ? Py_True : Py_False).inc_ref().ptr();
+ }
+};
+
+PYBIND11_NAMESPACE_BEGIN(detail)
+// Converts a value to the given unsigned type. If an error occurs, you get back (Unsigned) -1;
+// otherwise you get back the unsigned long or unsigned long long value cast to (Unsigned).
+// (The distinction is critically important when casting a returned -1 error value to some other
+// unsigned type: (A)-1 != (B)-1 when A and B are unsigned types of different sizes).
+template <typename Unsigned>
+Unsigned as_unsigned(PyObject *o) {
+ if (sizeof(Unsigned) <= sizeof(unsigned long)
+#if PY_VERSION_HEX < 0x03000000
+ || PyInt_Check(o)
+#endif
+ ) {
+ unsigned long v = PyLong_AsUnsignedLong(o);
+ return v == (unsigned long) -1 && PyErr_Occurred() ? (Unsigned) -1 : (Unsigned) v;
+ }
+ else {
+ unsigned long long v = PyLong_AsUnsignedLongLong(o);
+ return v == (unsigned long long) -1 && PyErr_Occurred() ? (Unsigned) -1 : (Unsigned) v;
+ }
+}
+PYBIND11_NAMESPACE_END(detail)
+
+class int_ : public object {
+public:
+ PYBIND11_OBJECT_CVT(int_, object, PYBIND11_LONG_CHECK, PyNumber_Long)
+ int_() : object(PyLong_FromLong(0), stolen_t{}) { }
+ // Allow implicit conversion from C++ integral types:
+ template <typename T,
+ detail::enable_if_t<std::is_integral<T>::value, int> = 0>
+ int_(T value) {
+ if (sizeof(T) <= sizeof(long)) {
+ if (std::is_signed<T>::value)
+ m_ptr = PyLong_FromLong((long) value);
+ else
+ m_ptr = PyLong_FromUnsignedLong((unsigned long) value);
+ } else {
+ if (std::is_signed<T>::value)
+ m_ptr = PyLong_FromLongLong((long long) value);
+ else
+ m_ptr = PyLong_FromUnsignedLongLong((unsigned long long) value);
+ }
+ if (!m_ptr) pybind11_fail("Could not allocate int object!");
+ }
+
+ template <typename T,
+ detail::enable_if_t<std::is_integral<T>::value, int> = 0>
+ operator T() const {
+ return std::is_unsigned<T>::value
+ ? detail::as_unsigned<T>(m_ptr)
+ : sizeof(T) <= sizeof(long)
+ ? (T) PyLong_AsLong(m_ptr)
+ : (T) PYBIND11_LONG_AS_LONGLONG(m_ptr);
+ }
+};
+
+class float_ : public object {
+public:
+ PYBIND11_OBJECT_CVT(float_, object, PyFloat_Check, PyNumber_Float)
+ // Allow implicit conversion from float/double:
+ float_(float value) : object(PyFloat_FromDouble((double) value), stolen_t{}) {
+ if (!m_ptr) pybind11_fail("Could not allocate float object!");
+ }
+ float_(double value = .0) : object(PyFloat_FromDouble((double) value), stolen_t{}) {
+ if (!m_ptr) pybind11_fail("Could not allocate float object!");
+ }
+ operator float() const { return (float) PyFloat_AsDouble(m_ptr); }
+ operator double() const { return (double) PyFloat_AsDouble(m_ptr); }
+};
+
+class weakref : public object {
+public:
+ PYBIND11_OBJECT_DEFAULT(weakref, object, PyWeakref_Check)
+ explicit weakref(handle obj, handle callback = {})
+ : object(PyWeakref_NewRef(obj.ptr(), callback.ptr()), stolen_t{}) {
+ if (!m_ptr) pybind11_fail("Could not allocate weak reference!");
+ }
+};
+
+class slice : public object {
+public:
+ PYBIND11_OBJECT_DEFAULT(slice, object, PySlice_Check)
+ slice(ssize_t start_, ssize_t stop_, ssize_t step_) {
+ int_ start(start_), stop(stop_), step(step_);
+ m_ptr = PySlice_New(start.ptr(), stop.ptr(), step.ptr());
+ if (!m_ptr) pybind11_fail("Could not allocate slice object!");
+ }
+ bool compute(size_t length, size_t *start, size_t *stop, size_t *step,
+ size_t *slicelength) const {
+ return PySlice_GetIndicesEx((PYBIND11_SLICE_OBJECT *) m_ptr,
+ (ssize_t) length, (ssize_t *) start,
+ (ssize_t *) stop, (ssize_t *) step,
+ (ssize_t *) slicelength) == 0;
+ }
+ bool compute(ssize_t length, ssize_t *start, ssize_t *stop, ssize_t *step,
+ ssize_t *slicelength) const {
+ return PySlice_GetIndicesEx((PYBIND11_SLICE_OBJECT *) m_ptr,
+ length, start,
+ stop, step,
+ slicelength) == 0;
+ }
+};
+
+class capsule : public object {
+public:
+ PYBIND11_OBJECT_DEFAULT(capsule, object, PyCapsule_CheckExact)
+ PYBIND11_DEPRECATED("Use reinterpret_borrow<capsule>() or reinterpret_steal<capsule>()")
+ capsule(PyObject *ptr, bool is_borrowed) : object(is_borrowed ? object(ptr, borrowed_t{}) : object(ptr, stolen_t{})) { }
+
+ explicit capsule(const void *value, const char *name = nullptr, void (*destructor)(PyObject *) = nullptr)
+ : object(PyCapsule_New(const_cast<void *>(value), name, destructor), stolen_t{}) {
+ if (!m_ptr)
+ pybind11_fail("Could not allocate capsule object!");
+ }
+
+ PYBIND11_DEPRECATED("Please pass a destructor that takes a void pointer as input")
+ capsule(const void *value, void (*destruct)(PyObject *))
+ : object(PyCapsule_New(const_cast<void*>(value), nullptr, destruct), stolen_t{}) {
+ if (!m_ptr)
+ pybind11_fail("Could not allocate capsule object!");
+ }
+
+ capsule(const void *value, void (*destructor)(void *)) {
+ m_ptr = PyCapsule_New(const_cast<void *>(value), nullptr, [](PyObject *o) {
+ auto destructor = reinterpret_cast<void (*)(void *)>(PyCapsule_GetContext(o));
+ void *ptr = PyCapsule_GetPointer(o, nullptr);
+ destructor(ptr);
+ });
+
+ if (!m_ptr)
+ pybind11_fail("Could not allocate capsule object!");
+
+ if (PyCapsule_SetContext(m_ptr, (void *) destructor) != 0)
+ pybind11_fail("Could not set capsule context!");
+ }
+
+ capsule(void (*destructor)()) {
+ m_ptr = PyCapsule_New(reinterpret_cast<void *>(destructor), nullptr, [](PyObject *o) {
+ auto destructor = reinterpret_cast<void (*)()>(PyCapsule_GetPointer(o, nullptr));
+ destructor();
+ });
+
+ if (!m_ptr)
+ pybind11_fail("Could not allocate capsule object!");
+ }
+
+ template <typename T> operator T *() const {
+ return get_pointer<T>();
+ }
+
+ /// Get the pointer the capsule holds.
+ template<typename T = void>
+ T* get_pointer() const {
+ auto name = this->name();
+ T *result = static_cast<T *>(PyCapsule_GetPointer(m_ptr, name));
+ if (!result) pybind11_fail("Unable to extract capsule contents!");
+ return result;
+ }
+
+ /// Replaces a capsule's pointer *without* calling the destructor on the existing one.
+ void set_pointer(const void *value) {
+ if (PyCapsule_SetPointer(m_ptr, const_cast<void *>(value)) != 0)
+ pybind11_fail("Could not set capsule pointer");
+ }
+
+ const char *name() const { return PyCapsule_GetName(m_ptr); }
+};
+
+class tuple : public object {
+public:
+ PYBIND11_OBJECT_CVT(tuple, object, PyTuple_Check, PySequence_Tuple)
+ explicit tuple(size_t size = 0) : object(PyTuple_New((ssize_t) size), stolen_t{}) {
+ if (!m_ptr) pybind11_fail("Could not allocate tuple object!");
+ }
+ size_t size() const { return (size_t) PyTuple_Size(m_ptr); }
+ bool empty() const { return size() == 0; }
+ detail::tuple_accessor operator[](size_t index) const { return {*this, index}; }
+ detail::item_accessor operator[](handle h) const { return object::operator[](h); }
+ detail::tuple_iterator begin() const { return {*this, 0}; }
+ detail::tuple_iterator end() const { return {*this, PyTuple_GET_SIZE(m_ptr)}; }
+};
+
+class dict : public object {
+public:
+ PYBIND11_OBJECT_CVT(dict, object, PyDict_Check, raw_dict)
+ dict() : object(PyDict_New(), stolen_t{}) {
+ if (!m_ptr) pybind11_fail("Could not allocate dict object!");
+ }
+ template <typename... Args,
+ typename = detail::enable_if_t<detail::all_of<detail::is_keyword_or_ds<Args>...>::value>,
+ // MSVC workaround: it can't compile an out-of-line definition, so defer the collector
+ typename collector = detail::deferred_t<detail::unpacking_collector<>, Args...>>
+ explicit dict(Args &&...args) : dict(collector(std::forward<Args>(args)...).kwargs()) { }
+
+ size_t size() const { return (size_t) PyDict_Size(m_ptr); }
+ bool empty() const { return size() == 0; }
+ detail::dict_iterator begin() const { return {*this, 0}; }
+ detail::dict_iterator end() const { return {}; }
+ void clear() const { PyDict_Clear(ptr()); }
+ template <typename T> bool contains(T &&key) const {
+ return PyDict_Contains(m_ptr, detail::object_or_cast(std::forward<T>(key)).ptr()) == 1;
+ }
+
+private:
+ /// Call the `dict` Python type -- always returns a new reference
+ static PyObject *raw_dict(PyObject *op) {
+ if (PyDict_Check(op))
+ return handle(op).inc_ref().ptr();
+ return PyObject_CallFunctionObjArgs((PyObject *) &PyDict_Type, op, nullptr);
+ }
+};
+
+class sequence : public object {
+public:
+ PYBIND11_OBJECT_DEFAULT(sequence, object, PySequence_Check)
+ size_t size() const {
+ ssize_t result = PySequence_Size(m_ptr);
+ if (result == -1)
+ throw error_already_set();
+ return (size_t) result;
+ }
+ bool empty() const { return size() == 0; }
+ detail::sequence_accessor operator[](size_t index) const { return {*this, index}; }
+ detail::item_accessor operator[](handle h) const { return object::operator[](h); }
+ detail::sequence_iterator begin() const { return {*this, 0}; }
+ detail::sequence_iterator end() const { return {*this, PySequence_Size(m_ptr)}; }
+};
+
+class list : public object {
+public:
+ PYBIND11_OBJECT_CVT(list, object, PyList_Check, PySequence_List)
+ explicit list(size_t size = 0) : object(PyList_New((ssize_t) size), stolen_t{}) {
+ if (!m_ptr) pybind11_fail("Could not allocate list object!");
+ }
+ size_t size() const { return (size_t) PyList_Size(m_ptr); }
+ bool empty() const { return size() == 0; }
+ detail::list_accessor operator[](size_t index) const { return {*this, index}; }
+ detail::item_accessor operator[](handle h) const { return object::operator[](h); }
+ detail::list_iterator begin() const { return {*this, 0}; }
+ detail::list_iterator end() const { return {*this, PyList_GET_SIZE(m_ptr)}; }
+ template <typename T> void append(T &&val) const {
+ PyList_Append(m_ptr, detail::object_or_cast(std::forward<T>(val)).ptr());
+ }
+ template <typename T> void insert(size_t index, T &&val) const {
+ PyList_Insert(m_ptr, static_cast<ssize_t>(index),
+ detail::object_or_cast(std::forward<T>(val)).ptr());
+ }
+};
+
+class args : public tuple { PYBIND11_OBJECT_DEFAULT(args, tuple, PyTuple_Check) };
+class kwargs : public dict { PYBIND11_OBJECT_DEFAULT(kwargs, dict, PyDict_Check) };
+
+class set : public object {
+public:
+ PYBIND11_OBJECT_CVT(set, object, PySet_Check, PySet_New)
+ set() : object(PySet_New(nullptr), stolen_t{}) {
+ if (!m_ptr) pybind11_fail("Could not allocate set object!");
+ }
+ size_t size() const { return (size_t) PySet_Size(m_ptr); }
+ bool empty() const { return size() == 0; }
+ template <typename T> bool add(T &&val) const {
+ return PySet_Add(m_ptr, detail::object_or_cast(std::forward<T>(val)).ptr()) == 0;
+ }
+ void clear() const { PySet_Clear(m_ptr); }
+ template <typename T> bool contains(T &&val) const {
+ return PySet_Contains(m_ptr, detail::object_or_cast(std::forward<T>(val)).ptr()) == 1;
+ }
+};
+
+class function : public object {
+public:
+ PYBIND11_OBJECT_DEFAULT(function, object, PyCallable_Check)
+ handle cpp_function() const {
+ handle fun = detail::get_function(m_ptr);
+ if (fun && PyCFunction_Check(fun.ptr()))
+ return fun;
+ return handle();
+ }
+ bool is_cpp_function() const { return (bool) cpp_function(); }
+};
+
+class staticmethod : public object {
+public:
+ PYBIND11_OBJECT_CVT(staticmethod, object, detail::PyStaticMethod_Check, PyStaticMethod_New)
+};
+
+class buffer : public object {
+public:
+ PYBIND11_OBJECT_DEFAULT(buffer, object, PyObject_CheckBuffer)
+
+ buffer_info request(bool writable = false) const {
+ int flags = PyBUF_STRIDES | PyBUF_FORMAT;
+ if (writable) flags |= PyBUF_WRITABLE;
+ auto *view = new Py_buffer();
+ if (PyObject_GetBuffer(m_ptr, view, flags) != 0) {
+ delete view;
+ throw error_already_set();
+ }
+ return buffer_info(view);
+ }
+};
+
+class memoryview : public object {
+public:
+ PYBIND11_OBJECT_CVT(memoryview, object, PyMemoryView_Check, PyMemoryView_FromObject)
+
+ /** \rst
+ Creates ``memoryview`` from ``buffer_info``.
+
+ ``buffer_info`` must be created from ``buffer::request()``. Otherwise
+ throws an exception.
+
+ For creating a ``memoryview`` from objects that support buffer protocol,
+ use ``memoryview(const object& obj)`` instead of this constructor.
+ \endrst */
+ explicit memoryview(const buffer_info& info) {
+ if (!info.view())
+ pybind11_fail("Prohibited to create memoryview without Py_buffer");
+ // Note: PyMemoryView_FromBuffer never increments obj reference.
+ m_ptr = (info.view()->obj) ?
+ PyMemoryView_FromObject(info.view()->obj) :
+ PyMemoryView_FromBuffer(info.view());
+ if (!m_ptr)
+ pybind11_fail("Unable to create memoryview from buffer descriptor");
+ }
+
+ /** \rst
+ Creates ``memoryview`` from static buffer.
+
+ This method is meant for providing a ``memoryview`` for C/C++ buffer not
+ managed by Python. The caller is responsible for managing the lifetime
+ of ``ptr`` and ``format``, which MUST outlive the memoryview constructed
+ here.
+
+ See also: Python C API documentation for `PyMemoryView_FromBuffer`_.
+
+ .. _PyMemoryView_FromBuffer: https://docs.python.org/c-api/memoryview.html#c.PyMemoryView_FromBuffer
+
+ :param ptr: Pointer to the buffer.
+ :param itemsize: Byte size of an element.
+ :param format: Pointer to the null-terminated format string. For
+ homogeneous Buffers, this should be set to
+ ``format_descriptor<T>::value``.
+ :param shape: Shape of the tensor (1 entry per dimension).
+ :param strides: Number of bytes between adjacent entries (for each
+ per dimension).
+ :param readonly: Flag to indicate if the underlying storage may be
+ written to.
+ \endrst */
+ static memoryview from_buffer(
+ void *ptr, ssize_t itemsize, const char *format,
+ detail::any_container<ssize_t> shape,
+ detail::any_container<ssize_t> strides, bool readonly = false);
+
+ static memoryview from_buffer(
+ const void *ptr, ssize_t itemsize, const char *format,
+ detail::any_container<ssize_t> shape,
+ detail::any_container<ssize_t> strides) {
+ return memoryview::from_buffer(
+ const_cast<void*>(ptr), itemsize, format, shape, strides, true);
+ }
+
+ template<typename T>
+ static memoryview from_buffer(
+ T *ptr, detail::any_container<ssize_t> shape,
+ detail::any_container<ssize_t> strides, bool readonly = false) {
+ return memoryview::from_buffer(
+ reinterpret_cast<void*>(ptr), sizeof(T),
+ format_descriptor<T>::value, shape, strides, readonly);
+ }
+
+ template<typename T>
+ static memoryview from_buffer(
+ const T *ptr, detail::any_container<ssize_t> shape,
+ detail::any_container<ssize_t> strides) {
+ return memoryview::from_buffer(
+ const_cast<T*>(ptr), shape, strides, true);
+ }
+
+#if PY_MAJOR_VERSION >= 3
+ /** \rst
+ Creates ``memoryview`` from static memory.
+
+ This method is meant for providing a ``memoryview`` for C/C++ buffer not
+ managed by Python. The caller is responsible for managing the lifetime
+ of ``mem``, which MUST outlive the memoryview constructed here.
+
+ This method is not available in Python 2.
+
+ See also: Python C API documentation for `PyMemoryView_FromBuffer`_.
+
+ .. _PyMemoryView_FromMemory: https://docs.python.org/c-api/memoryview.html#c.PyMemoryView_FromMemory
+ \endrst */
+ static memoryview from_memory(void *mem, ssize_t size, bool readonly = false) {
+ PyObject* ptr = PyMemoryView_FromMemory(
+ reinterpret_cast<char*>(mem), size,
+ (readonly) ? PyBUF_READ : PyBUF_WRITE);
+ if (!ptr)
+ pybind11_fail("Could not allocate memoryview object!");
+ return memoryview(object(ptr, stolen_t{}));
+ }
+
+ static memoryview from_memory(const void *mem, ssize_t size) {
+ return memoryview::from_memory(const_cast<void*>(mem), size, true);
+ }
+#endif
+};
+
+#ifndef DOXYGEN_SHOULD_SKIP_THIS
+inline memoryview memoryview::from_buffer(
+ void *ptr, ssize_t itemsize, const char* format,
+ detail::any_container<ssize_t> shape,
+ detail::any_container<ssize_t> strides, bool readonly) {
+ size_t ndim = shape->size();
+ if (ndim != strides->size())
+ pybind11_fail("memoryview: shape length doesn't match strides length");
+ ssize_t size = ndim ? 1 : 0;
+ for (size_t i = 0; i < ndim; ++i)
+ size *= (*shape)[i];
+ Py_buffer view;
+ view.buf = ptr;
+ view.obj = nullptr;
+ view.len = size * itemsize;
+ view.readonly = static_cast<int>(readonly);
+ view.itemsize = itemsize;
+ view.format = const_cast<char*>(format);
+ view.ndim = static_cast<int>(ndim);
+ view.shape = shape->data();
+ view.strides = strides->data();
+ view.suboffsets = nullptr;
+ view.internal = nullptr;
+ PyObject* obj = PyMemoryView_FromBuffer(&view);
+ if (!obj)
+ throw error_already_set();
+ return memoryview(object(obj, stolen_t{}));
+}
+#endif // DOXYGEN_SHOULD_SKIP_THIS
+/// @} pytypes
+
+/// \addtogroup python_builtins
+/// @{
+
+/// Get the length of a Python object.
+inline size_t len(handle h) {
+ ssize_t result = PyObject_Length(h.ptr());
+ if (result < 0)
+ throw error_already_set();
+ return (size_t) result;
+}
+
+/// Get the length hint of a Python object.
+/// Returns 0 when this cannot be determined.
+inline size_t len_hint(handle h) {
+#if PY_VERSION_HEX >= 0x03040000
+ ssize_t result = PyObject_LengthHint(h.ptr(), 0);
+#else
+ ssize_t result = PyObject_Length(h.ptr());
+#endif
+ if (result < 0) {
+ // Sometimes a length can't be determined at all (eg generators)
+ // In which case simply return 0
+ PyErr_Clear();
+ return 0;
+ }
+ return (size_t) result;
+}
+
+inline str repr(handle h) {
+ PyObject *str_value = PyObject_Repr(h.ptr());
+ if (!str_value) throw error_already_set();
+#if PY_MAJOR_VERSION < 3
+ PyObject *unicode = PyUnicode_FromEncodedObject(str_value, "utf-8", nullptr);
+ Py_XDECREF(str_value); str_value = unicode;
+ if (!str_value) throw error_already_set();
+#endif
+ return reinterpret_steal<str>(str_value);
+}
+
+inline iterator iter(handle obj) {
+ PyObject *result = PyObject_GetIter(obj.ptr());
+ if (!result) { throw error_already_set(); }
+ return reinterpret_steal<iterator>(result);
+}
+/// @} python_builtins
+
+PYBIND11_NAMESPACE_BEGIN(detail)
+template <typename D> iterator object_api<D>::begin() const { return iter(derived()); }
+template <typename D> iterator object_api<D>::end() const { return iterator::sentinel(); }
+template <typename D> item_accessor object_api<D>::operator[](handle key) const {
+ return {derived(), reinterpret_borrow<object>(key)};
+}
+template <typename D> item_accessor object_api<D>::operator[](const char *key) const {
+ return {derived(), pybind11::str(key)};
+}
+template <typename D> obj_attr_accessor object_api<D>::attr(handle key) const {
+ return {derived(), reinterpret_borrow<object>(key)};
+}
+template <typename D> str_attr_accessor object_api<D>::attr(const char *key) const {
+ return {derived(), key};
+}
+template <typename D> args_proxy object_api<D>::operator*() const {
+ return args_proxy(derived().ptr());
+}
+template <typename D> template <typename T> bool object_api<D>::contains(T &&item) const {
+ return attr("__contains__")(std::forward<T>(item)).template cast<bool>();
+}
+
+template <typename D>
+pybind11::str object_api<D>::str() const { return pybind11::str(derived()); }
+
+template <typename D>
+str_attr_accessor object_api<D>::doc() const { return attr("__doc__"); }
+
+template <typename D>
+handle object_api<D>::get_type() const { return type::handle_of(derived()); }
+
+template <typename D>
+bool object_api<D>::rich_compare(object_api const &other, int value) const {
+ int rv = PyObject_RichCompareBool(derived().ptr(), other.derived().ptr(), value);
+ if (rv == -1)
+ throw error_already_set();
+ return rv == 1;
+}
+
+#define PYBIND11_MATH_OPERATOR_UNARY(op, fn) \
+ template <typename D> object object_api<D>::op() const { \
+ object result = reinterpret_steal<object>(fn(derived().ptr())); \
+ if (!result.ptr()) \
+ throw error_already_set(); \
+ return result; \
+ }
+
+#define PYBIND11_MATH_OPERATOR_BINARY(op, fn) \
+ template <typename D> \
+ object object_api<D>::op(object_api const &other) const { \
+ object result = reinterpret_steal<object>( \
+ fn(derived().ptr(), other.derived().ptr())); \
+ if (!result.ptr()) \
+ throw error_already_set(); \
+ return result; \
+ }
+
+PYBIND11_MATH_OPERATOR_UNARY (operator~, PyNumber_Invert)
+PYBIND11_MATH_OPERATOR_UNARY (operator-, PyNumber_Negative)
+PYBIND11_MATH_OPERATOR_BINARY(operator+, PyNumber_Add)
+PYBIND11_MATH_OPERATOR_BINARY(operator+=, PyNumber_InPlaceAdd)
+PYBIND11_MATH_OPERATOR_BINARY(operator-, PyNumber_Subtract)
+PYBIND11_MATH_OPERATOR_BINARY(operator-=, PyNumber_InPlaceSubtract)
+PYBIND11_MATH_OPERATOR_BINARY(operator*, PyNumber_Multiply)
+PYBIND11_MATH_OPERATOR_BINARY(operator*=, PyNumber_InPlaceMultiply)
+PYBIND11_MATH_OPERATOR_BINARY(operator/, PyNumber_TrueDivide)
+PYBIND11_MATH_OPERATOR_BINARY(operator/=, PyNumber_InPlaceTrueDivide)
+PYBIND11_MATH_OPERATOR_BINARY(operator|, PyNumber_Or)
+PYBIND11_MATH_OPERATOR_BINARY(operator|=, PyNumber_InPlaceOr)
+PYBIND11_MATH_OPERATOR_BINARY(operator&, PyNumber_And)
+PYBIND11_MATH_OPERATOR_BINARY(operator&=, PyNumber_InPlaceAnd)
+PYBIND11_MATH_OPERATOR_BINARY(operator^, PyNumber_Xor)
+PYBIND11_MATH_OPERATOR_BINARY(operator^=, PyNumber_InPlaceXor)
+PYBIND11_MATH_OPERATOR_BINARY(operator<<, PyNumber_Lshift)
+PYBIND11_MATH_OPERATOR_BINARY(operator<<=, PyNumber_InPlaceLshift)
+PYBIND11_MATH_OPERATOR_BINARY(operator>>, PyNumber_Rshift)
+PYBIND11_MATH_OPERATOR_BINARY(operator>>=, PyNumber_InPlaceRshift)
+
+#undef PYBIND11_MATH_OPERATOR_UNARY
+#undef PYBIND11_MATH_OPERATOR_BINARY
+
+PYBIND11_NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
--- /dev/null
+/*
+ pybind11/stl.h: Transparent conversion for STL data types
+
+ Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>
+
+ All rights reserved. Use of this source code is governed by a
+ BSD-style license that can be found in the LICENSE file.
+*/
+
+#pragma once
+
+#include "pybind11.h"
+#include <set>
+#include <unordered_set>
+#include <map>
+#include <unordered_map>
+#include <iostream>
+#include <list>
+#include <deque>
+#include <valarray>
+
+#if defined(_MSC_VER)
+#pragma warning(push)
+#pragma warning(disable: 4127) // warning C4127: Conditional expression is constant
+#endif
+
+#ifdef __has_include
+// std::optional (but including it in c++14 mode isn't allowed)
+# if defined(PYBIND11_CPP17) && __has_include(<optional>)
+# include <optional>
+# define PYBIND11_HAS_OPTIONAL 1
+# endif
+// std::experimental::optional (but not allowed in c++11 mode)
+# if defined(PYBIND11_CPP14) && (__has_include(<experimental/optional>) && \
+ !__has_include(<optional>))
+# include <experimental/optional>
+# define PYBIND11_HAS_EXP_OPTIONAL 1
+# endif
+// std::variant
+# if defined(PYBIND11_CPP17) && __has_include(<variant>)
+# include <variant>
+# define PYBIND11_HAS_VARIANT 1
+# endif
+#elif defined(_MSC_VER) && defined(PYBIND11_CPP17)
+# include <optional>
+# include <variant>
+# define PYBIND11_HAS_OPTIONAL 1
+# define PYBIND11_HAS_VARIANT 1
+#endif
+
+PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_BEGIN(detail)
+
+/// Extracts an const lvalue reference or rvalue reference for U based on the type of T (e.g. for
+/// forwarding a container element). Typically used indirect via forwarded_type(), below.
+template <typename T, typename U>
+using forwarded_type = conditional_t<
+ std::is_lvalue_reference<T>::value, remove_reference_t<U> &, remove_reference_t<U> &&>;
+
+/// Forwards a value U as rvalue or lvalue according to whether T is rvalue or lvalue; typically
+/// used for forwarding a container's elements.
+template <typename T, typename U>
+forwarded_type<T, U> forward_like(U &&u) {
+ return std::forward<detail::forwarded_type<T, U>>(std::forward<U>(u));
+}
+
+template <typename Type, typename Key> struct set_caster {
+ using type = Type;
+ using key_conv = make_caster<Key>;
+
+ bool load(handle src, bool convert) {
+ if (!isinstance<pybind11::set>(src))
+ return false;
+ auto s = reinterpret_borrow<pybind11::set>(src);
+ value.clear();
+ for (auto entry : s) {
+ key_conv conv;
+ if (!conv.load(entry, convert))
+ return false;
+ value.insert(cast_op<Key &&>(std::move(conv)));
+ }
+ return true;
+ }
+
+ template <typename T>
+ static handle cast(T &&src, return_value_policy policy, handle parent) {
+ if (!std::is_lvalue_reference<T>::value)
+ policy = return_value_policy_override<Key>::policy(policy);
+ pybind11::set s;
+ for (auto &&value : src) {
+ auto value_ = reinterpret_steal<object>(key_conv::cast(forward_like<T>(value), policy, parent));
+ if (!value_ || !s.add(value_))
+ return handle();
+ }
+ return s.release();
+ }
+
+ PYBIND11_TYPE_CASTER(type, _("Set[") + key_conv::name + _("]"));
+};
+
+template <typename Type, typename Key, typename Value> struct map_caster {
+ using key_conv = make_caster<Key>;
+ using value_conv = make_caster<Value>;
+
+ bool load(handle src, bool convert) {
+ if (!isinstance<dict>(src))
+ return false;
+ auto d = reinterpret_borrow<dict>(src);
+ value.clear();
+ for (auto it : d) {
+ key_conv kconv;
+ value_conv vconv;
+ if (!kconv.load(it.first.ptr(), convert) ||
+ !vconv.load(it.second.ptr(), convert))
+ return false;
+ value.emplace(cast_op<Key &&>(std::move(kconv)), cast_op<Value &&>(std::move(vconv)));
+ }
+ return true;
+ }
+
+ template <typename T>
+ static handle cast(T &&src, return_value_policy policy, handle parent) {
+ dict d;
+ return_value_policy policy_key = policy;
+ return_value_policy policy_value = policy;
+ if (!std::is_lvalue_reference<T>::value) {
+ policy_key = return_value_policy_override<Key>::policy(policy_key);
+ policy_value = return_value_policy_override<Value>::policy(policy_value);
+ }
+ for (auto &&kv : src) {
+ auto key = reinterpret_steal<object>(key_conv::cast(forward_like<T>(kv.first), policy_key, parent));
+ auto value = reinterpret_steal<object>(value_conv::cast(forward_like<T>(kv.second), policy_value, parent));
+ if (!key || !value)
+ return handle();
+ d[key] = value;
+ }
+ return d.release();
+ }
+
+ PYBIND11_TYPE_CASTER(Type, _("Dict[") + key_conv::name + _(", ") + value_conv::name + _("]"));
+};
+
+template <typename Type, typename Value> struct list_caster {
+ using value_conv = make_caster<Value>;
+
+ bool load(handle src, bool convert) {
+ if (!isinstance<sequence>(src) || isinstance<str>(src))
+ return false;
+ auto s = reinterpret_borrow<sequence>(src);
+ value.clear();
+ reserve_maybe(s, &value);
+ for (auto it : s) {
+ value_conv conv;
+ if (!conv.load(it, convert))
+ return false;
+ value.push_back(cast_op<Value &&>(std::move(conv)));
+ }
+ return true;
+ }
+
+private:
+ template <typename T = Type,
+ enable_if_t<std::is_same<decltype(std::declval<T>().reserve(0)), void>::value, int> = 0>
+ void reserve_maybe(sequence s, Type *) { value.reserve(s.size()); }
+ void reserve_maybe(sequence, void *) { }
+
+public:
+ template <typename T>
+ static handle cast(T &&src, return_value_policy policy, handle parent) {
+ if (!std::is_lvalue_reference<T>::value)
+ policy = return_value_policy_override<Value>::policy(policy);
+ list l(src.size());
+ size_t index = 0;
+ for (auto &&value : src) {
+ auto value_ = reinterpret_steal<object>(value_conv::cast(forward_like<T>(value), policy, parent));
+ if (!value_)
+ return handle();
+ PyList_SET_ITEM(l.ptr(), (ssize_t) index++, value_.release().ptr()); // steals a reference
+ }
+ return l.release();
+ }
+
+ PYBIND11_TYPE_CASTER(Type, _("List[") + value_conv::name + _("]"));
+};
+
+template <typename Type, typename Alloc> struct type_caster<std::vector<Type, Alloc>>
+ : list_caster<std::vector<Type, Alloc>, Type> { };
+
+template <typename Type, typename Alloc> struct type_caster<std::deque<Type, Alloc>>
+ : list_caster<std::deque<Type, Alloc>, Type> { };
+
+template <typename Type, typename Alloc> struct type_caster<std::list<Type, Alloc>>
+ : list_caster<std::list<Type, Alloc>, Type> { };
+
+template <typename ArrayType, typename Value, bool Resizable, size_t Size = 0> struct array_caster {
+ using value_conv = make_caster<Value>;
+
+private:
+ template <bool R = Resizable>
+ bool require_size(enable_if_t<R, size_t> size) {
+ if (value.size() != size)
+ value.resize(size);
+ return true;
+ }
+ template <bool R = Resizable>
+ bool require_size(enable_if_t<!R, size_t> size) {
+ return size == Size;
+ }
+
+public:
+ bool load(handle src, bool convert) {
+ if (!isinstance<sequence>(src))
+ return false;
+ auto l = reinterpret_borrow<sequence>(src);
+ if (!require_size(l.size()))
+ return false;
+ size_t ctr = 0;
+ for (auto it : l) {
+ value_conv conv;
+ if (!conv.load(it, convert))
+ return false;
+ value[ctr++] = cast_op<Value &&>(std::move(conv));
+ }
+ return true;
+ }
+
+ template <typename T>
+ static handle cast(T &&src, return_value_policy policy, handle parent) {
+ list l(src.size());
+ size_t index = 0;
+ for (auto &&value : src) {
+ auto value_ = reinterpret_steal<object>(value_conv::cast(forward_like<T>(value), policy, parent));
+ if (!value_)
+ return handle();
+ PyList_SET_ITEM(l.ptr(), (ssize_t) index++, value_.release().ptr()); // steals a reference
+ }
+ return l.release();
+ }
+
+ PYBIND11_TYPE_CASTER(ArrayType, _("List[") + value_conv::name + _<Resizable>(_(""), _("[") + _<Size>() + _("]")) + _("]"));
+};
+
+template <typename Type, size_t Size> struct type_caster<std::array<Type, Size>>
+ : array_caster<std::array<Type, Size>, Type, false, Size> { };
+
+template <typename Type> struct type_caster<std::valarray<Type>>
+ : array_caster<std::valarray<Type>, Type, true> { };
+
+template <typename Key, typename Compare, typename Alloc> struct type_caster<std::set<Key, Compare, Alloc>>
+ : set_caster<std::set<Key, Compare, Alloc>, Key> { };
+
+template <typename Key, typename Hash, typename Equal, typename Alloc> struct type_caster<std::unordered_set<Key, Hash, Equal, Alloc>>
+ : set_caster<std::unordered_set<Key, Hash, Equal, Alloc>, Key> { };
+
+template <typename Key, typename Value, typename Compare, typename Alloc> struct type_caster<std::map<Key, Value, Compare, Alloc>>
+ : map_caster<std::map<Key, Value, Compare, Alloc>, Key, Value> { };
+
+template <typename Key, typename Value, typename Hash, typename Equal, typename Alloc> struct type_caster<std::unordered_map<Key, Value, Hash, Equal, Alloc>>
+ : map_caster<std::unordered_map<Key, Value, Hash, Equal, Alloc>, Key, Value> { };
+
+// This type caster is intended to be used for std::optional and std::experimental::optional
+template<typename T> struct optional_caster {
+ using value_conv = make_caster<typename T::value_type>;
+
+ template <typename T_>
+ static handle cast(T_ &&src, return_value_policy policy, handle parent) {
+ if (!src)
+ return none().inc_ref();
+ if (!std::is_lvalue_reference<T>::value) {
+ policy = return_value_policy_override<T>::policy(policy);
+ }
+ return value_conv::cast(*std::forward<T_>(src), policy, parent);
+ }
+
+ bool load(handle src, bool convert) {
+ if (!src) {
+ return false;
+ } else if (src.is_none()) {
+ return true; // default-constructed value is already empty
+ }
+ value_conv inner_caster;
+ if (!inner_caster.load(src, convert))
+ return false;
+
+ value.emplace(cast_op<typename T::value_type &&>(std::move(inner_caster)));
+ return true;
+ }
+
+ PYBIND11_TYPE_CASTER(T, _("Optional[") + value_conv::name + _("]"));
+};
+
+#if defined(PYBIND11_HAS_OPTIONAL)
+template<typename T> struct type_caster<std::optional<T>>
+ : public optional_caster<std::optional<T>> {};
+
+template<> struct type_caster<std::nullopt_t>
+ : public void_caster<std::nullopt_t> {};
+#endif
+
+#if defined(PYBIND11_HAS_EXP_OPTIONAL)
+template<typename T> struct type_caster<std::experimental::optional<T>>
+ : public optional_caster<std::experimental::optional<T>> {};
+
+template<> struct type_caster<std::experimental::nullopt_t>
+ : public void_caster<std::experimental::nullopt_t> {};
+#endif
+
+/// Visit a variant and cast any found type to Python
+struct variant_caster_visitor {
+ return_value_policy policy;
+ handle parent;
+
+ using result_type = handle; // required by boost::variant in C++11
+
+ template <typename T>
+ result_type operator()(T &&src) const {
+ return make_caster<T>::cast(std::forward<T>(src), policy, parent);
+ }
+};
+
+/// Helper class which abstracts away variant's `visit` function. `std::variant` and similar
+/// `namespace::variant` types which provide a `namespace::visit()` function are handled here
+/// automatically using argument-dependent lookup. Users can provide specializations for other
+/// variant-like classes, e.g. `boost::variant` and `boost::apply_visitor`.
+template <template<typename...> class Variant>
+struct visit_helper {
+ template <typename... Args>
+ static auto call(Args &&...args) -> decltype(visit(std::forward<Args>(args)...)) {
+ return visit(std::forward<Args>(args)...);
+ }
+};
+
+/// Generic variant caster
+template <typename Variant> struct variant_caster;
+
+template <template<typename...> class V, typename... Ts>
+struct variant_caster<V<Ts...>> {
+ static_assert(sizeof...(Ts) > 0, "Variant must consist of at least one alternative.");
+
+ template <typename U, typename... Us>
+ bool load_alternative(handle src, bool convert, type_list<U, Us...>) {
+ auto caster = make_caster<U>();
+ if (caster.load(src, convert)) {
+ value = cast_op<U>(caster);
+ return true;
+ }
+ return load_alternative(src, convert, type_list<Us...>{});
+ }
+
+ bool load_alternative(handle, bool, type_list<>) { return false; }
+
+ bool load(handle src, bool convert) {
+ // Do a first pass without conversions to improve constructor resolution.
+ // E.g. `py::int_(1).cast<variant<double, int>>()` needs to fill the `int`
+ // slot of the variant. Without two-pass loading `double` would be filled
+ // because it appears first and a conversion is possible.
+ if (convert && load_alternative(src, false, type_list<Ts...>{}))
+ return true;
+ return load_alternative(src, convert, type_list<Ts...>{});
+ }
+
+ template <typename Variant>
+ static handle cast(Variant &&src, return_value_policy policy, handle parent) {
+ return visit_helper<V>::call(variant_caster_visitor{policy, parent},
+ std::forward<Variant>(src));
+ }
+
+ using Type = V<Ts...>;
+ PYBIND11_TYPE_CASTER(Type, _("Union[") + detail::concat(make_caster<Ts>::name...) + _("]"));
+};
+
+#if defined(PYBIND11_HAS_VARIANT)
+template <typename... Ts>
+struct type_caster<std::variant<Ts...>> : variant_caster<std::variant<Ts...>> { };
+#endif
+
+PYBIND11_NAMESPACE_END(detail)
+
+inline std::ostream &operator<<(std::ostream &os, const handle &obj) {
+ os << (std::string) str(obj);
+ return os;
+}
+
+PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
+
+#if defined(_MSC_VER)
+#pragma warning(pop)
+#endif
--- /dev/null
+/*
+ pybind11/std_bind.h: Binding generators for STL data types
+
+ Copyright (c) 2016 Sergey Lyskov and Wenzel Jakob
+
+ All rights reserved. Use of this source code is governed by a
+ BSD-style license that can be found in the LICENSE file.
+*/
+
+#pragma once
+
+#include "detail/common.h"
+#include "operators.h"
+
+#include <algorithm>
+#include <sstream>
+
+PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
+PYBIND11_NAMESPACE_BEGIN(detail)
+
+/* SFINAE helper class used by 'is_comparable */
+template <typename T> struct container_traits {
+ template <typename T2> static std::true_type test_comparable(decltype(std::declval<const T2 &>() == std::declval<const T2 &>())*);
+ template <typename T2> static std::false_type test_comparable(...);
+ template <typename T2> static std::true_type test_value(typename T2::value_type *);
+ template <typename T2> static std::false_type test_value(...);
+ template <typename T2> static std::true_type test_pair(typename T2::first_type *, typename T2::second_type *);
+ template <typename T2> static std::false_type test_pair(...);
+
+ static constexpr const bool is_comparable = std::is_same<std::true_type, decltype(test_comparable<T>(nullptr))>::value;
+ static constexpr const bool is_pair = std::is_same<std::true_type, decltype(test_pair<T>(nullptr, nullptr))>::value;
+ static constexpr const bool is_vector = std::is_same<std::true_type, decltype(test_value<T>(nullptr))>::value;
+ static constexpr const bool is_element = !is_pair && !is_vector;
+};
+
+/* Default: is_comparable -> std::false_type */
+template <typename T, typename SFINAE = void>
+struct is_comparable : std::false_type { };
+
+/* For non-map data structures, check whether operator== can be instantiated */
+template <typename T>
+struct is_comparable<
+ T, enable_if_t<container_traits<T>::is_element &&
+ container_traits<T>::is_comparable>>
+ : std::true_type { };
+
+/* For a vector/map data structure, recursively check the value type (which is std::pair for maps) */
+template <typename T>
+struct is_comparable<T, enable_if_t<container_traits<T>::is_vector>> {
+ static constexpr const bool value =
+ is_comparable<typename T::value_type>::value;
+};
+
+/* For pairs, recursively check the two data types */
+template <typename T>
+struct is_comparable<T, enable_if_t<container_traits<T>::is_pair>> {
+ static constexpr const bool value =
+ is_comparable<typename T::first_type>::value &&
+ is_comparable<typename T::second_type>::value;
+};
+
+/* Fallback functions */
+template <typename, typename, typename... Args> void vector_if_copy_constructible(const Args &...) { }
+template <typename, typename, typename... Args> void vector_if_equal_operator(const Args &...) { }
+template <typename, typename, typename... Args> void vector_if_insertion_operator(const Args &...) { }
+template <typename, typename, typename... Args> void vector_modifiers(const Args &...) { }
+
+template<typename Vector, typename Class_>
+void vector_if_copy_constructible(enable_if_t<is_copy_constructible<Vector>::value, Class_> &cl) {
+ cl.def(init<const Vector &>(), "Copy constructor");
+}
+
+template<typename Vector, typename Class_>
+void vector_if_equal_operator(enable_if_t<is_comparable<Vector>::value, Class_> &cl) {
+ using T = typename Vector::value_type;
+
+ cl.def(self == self);
+ cl.def(self != self);
+
+ cl.def("count",
+ [](const Vector &v, const T &x) {
+ return std::count(v.begin(), v.end(), x);
+ },
+ arg("x"),
+ "Return the number of times ``x`` appears in the list"
+ );
+
+ cl.def("remove", [](Vector &v, const T &x) {
+ auto p = std::find(v.begin(), v.end(), x);
+ if (p != v.end())
+ v.erase(p);
+ else
+ throw value_error();
+ },
+ arg("x"),
+ "Remove the first item from the list whose value is x. "
+ "It is an error if there is no such item."
+ );
+
+ cl.def("__contains__",
+ [](const Vector &v, const T &x) {
+ return std::find(v.begin(), v.end(), x) != v.end();
+ },
+ arg("x"),
+ "Return true the container contains ``x``"
+ );
+}
+
+// Vector modifiers -- requires a copyable vector_type:
+// (Technically, some of these (pop and __delitem__) don't actually require copyability, but it seems
+// silly to allow deletion but not insertion, so include them here too.)
+template <typename Vector, typename Class_>
+void vector_modifiers(enable_if_t<is_copy_constructible<typename Vector::value_type>::value, Class_> &cl) {
+ using T = typename Vector::value_type;
+ using SizeType = typename Vector::size_type;
+ using DiffType = typename Vector::difference_type;
+
+ auto wrap_i = [](DiffType i, SizeType n) {
+ if (i < 0)
+ i += n;
+ if (i < 0 || (SizeType)i >= n)
+ throw index_error();
+ return i;
+ };
+
+ cl.def("append",
+ [](Vector &v, const T &value) { v.push_back(value); },
+ arg("x"),
+ "Add an item to the end of the list");
+
+ cl.def(init([](iterable it) {
+ auto v = std::unique_ptr<Vector>(new Vector());
+ v->reserve(len_hint(it));
+ for (handle h : it)
+ v->push_back(h.cast<T>());
+ return v.release();
+ }));
+
+ cl.def("clear",
+ [](Vector &v) {
+ v.clear();
+ },
+ "Clear the contents"
+ );
+
+ cl.def("extend",
+ [](Vector &v, const Vector &src) {
+ v.insert(v.end(), src.begin(), src.end());
+ },
+ arg("L"),
+ "Extend the list by appending all the items in the given list"
+ );
+
+ cl.def("extend",
+ [](Vector &v, iterable it) {
+ const size_t old_size = v.size();
+ v.reserve(old_size + len_hint(it));
+ try {
+ for (handle h : it) {
+ v.push_back(h.cast<T>());
+ }
+ } catch (const cast_error &) {
+ v.erase(v.begin() + static_cast<typename Vector::difference_type>(old_size), v.end());
+ try {
+ v.shrink_to_fit();
+ } catch (const std::exception &) {
+ // Do nothing
+ }
+ throw;
+ }
+ },
+ arg("L"),
+ "Extend the list by appending all the items in the given list"
+ );
+
+ cl.def("insert",
+ [](Vector &v, DiffType i, const T &x) {
+ // Can't use wrap_i; i == v.size() is OK
+ if (i < 0)
+ i += v.size();
+ if (i < 0 || (SizeType)i > v.size())
+ throw index_error();
+ v.insert(v.begin() + i, x);
+ },
+ arg("i") , arg("x"),
+ "Insert an item at a given position."
+ );
+
+ cl.def("pop",
+ [](Vector &v) {
+ if (v.empty())
+ throw index_error();
+ T t = v.back();
+ v.pop_back();
+ return t;
+ },
+ "Remove and return the last item"
+ );
+
+ cl.def("pop",
+ [wrap_i](Vector &v, DiffType i) {
+ i = wrap_i(i, v.size());
+ T t = v[(SizeType) i];
+ v.erase(v.begin() + i);
+ return t;
+ },
+ arg("i"),
+ "Remove and return the item at index ``i``"
+ );
+
+ cl.def("__setitem__",
+ [wrap_i](Vector &v, DiffType i, const T &t) {
+ i = wrap_i(i, v.size());
+ v[(SizeType)i] = t;
+ }
+ );
+
+ /// Slicing protocol
+ cl.def("__getitem__",
+ [](const Vector &v, slice slice) -> Vector * {
+ size_t start, stop, step, slicelength;
+
+ if (!slice.compute(v.size(), &start, &stop, &step, &slicelength))
+ throw error_already_set();
+
+ auto *seq = new Vector();
+ seq->reserve((size_t) slicelength);
+
+ for (size_t i=0; i<slicelength; ++i) {
+ seq->push_back(v[start]);
+ start += step;
+ }
+ return seq;
+ },
+ arg("s"),
+ "Retrieve list elements using a slice object"
+ );
+
+ cl.def("__setitem__",
+ [](Vector &v, slice slice, const Vector &value) {
+ size_t start, stop, step, slicelength;
+ if (!slice.compute(v.size(), &start, &stop, &step, &slicelength))
+ throw error_already_set();
+
+ if (slicelength != value.size())
+ throw std::runtime_error("Left and right hand size of slice assignment have different sizes!");
+
+ for (size_t i=0; i<slicelength; ++i) {
+ v[start] = value[i];
+ start += step;
+ }
+ },
+ "Assign list elements using a slice object"
+ );
+
+ cl.def("__delitem__",
+ [wrap_i](Vector &v, DiffType i) {
+ i = wrap_i(i, v.size());
+ v.erase(v.begin() + i);
+ },
+ "Delete the list elements at index ``i``"
+ );
+
+ cl.def("__delitem__",
+ [](Vector &v, slice slice) {
+ size_t start, stop, step, slicelength;
+
+ if (!slice.compute(v.size(), &start, &stop, &step, &slicelength))
+ throw error_already_set();
+
+ if (step == 1 && false) {
+ v.erase(v.begin() + (DiffType) start, v.begin() + DiffType(start + slicelength));
+ } else {
+ for (size_t i = 0; i < slicelength; ++i) {
+ v.erase(v.begin() + DiffType(start));
+ start += step - 1;
+ }
+ }
+ },
+ "Delete list elements using a slice object"
+ );
+
+}
+
+// If the type has an operator[] that doesn't return a reference (most notably std::vector<bool>),
+// we have to access by copying; otherwise we return by reference.
+template <typename Vector> using vector_needs_copy = negation<
+ std::is_same<decltype(std::declval<Vector>()[typename Vector::size_type()]), typename Vector::value_type &>>;
+
+// The usual case: access and iterate by reference
+template <typename Vector, typename Class_>
+void vector_accessor(enable_if_t<!vector_needs_copy<Vector>::value, Class_> &cl) {
+ using T = typename Vector::value_type;
+ using SizeType = typename Vector::size_type;
+ using DiffType = typename Vector::difference_type;
+ using ItType = typename Vector::iterator;
+
+ auto wrap_i = [](DiffType i, SizeType n) {
+ if (i < 0)
+ i += n;
+ if (i < 0 || (SizeType)i >= n)
+ throw index_error();
+ return i;
+ };
+
+ cl.def("__getitem__",
+ [wrap_i](Vector &v, DiffType i) -> T & {
+ i = wrap_i(i, v.size());
+ return v[(SizeType)i];
+ },
+ return_value_policy::reference_internal // ref + keepalive
+ );
+
+ cl.def("__iter__",
+ [](Vector &v) {
+ return make_iterator<
+ return_value_policy::reference_internal, ItType, ItType, T&>(
+ v.begin(), v.end());
+ },
+ keep_alive<0, 1>() /* Essential: keep list alive while iterator exists */
+ );
+}
+
+// The case for special objects, like std::vector<bool>, that have to be returned-by-copy:
+template <typename Vector, typename Class_>
+void vector_accessor(enable_if_t<vector_needs_copy<Vector>::value, Class_> &cl) {
+ using T = typename Vector::value_type;
+ using SizeType = typename Vector::size_type;
+ using DiffType = typename Vector::difference_type;
+ using ItType = typename Vector::iterator;
+ cl.def("__getitem__",
+ [](const Vector &v, DiffType i) -> T {
+ if (i < 0 && (i += v.size()) < 0)
+ throw index_error();
+ if ((SizeType)i >= v.size())
+ throw index_error();
+ return v[(SizeType)i];
+ }
+ );
+
+ cl.def("__iter__",
+ [](Vector &v) {
+ return make_iterator<
+ return_value_policy::copy, ItType, ItType, T>(
+ v.begin(), v.end());
+ },
+ keep_alive<0, 1>() /* Essential: keep list alive while iterator exists */
+ );
+}
+
+template <typename Vector, typename Class_> auto vector_if_insertion_operator(Class_ &cl, std::string const &name)
+ -> decltype(std::declval<std::ostream&>() << std::declval<typename Vector::value_type>(), void()) {
+ using size_type = typename Vector::size_type;
+
+ cl.def("__repr__",
+ [name](Vector &v) {
+ std::ostringstream s;
+ s << name << '[';
+ for (size_type i=0; i < v.size(); ++i) {
+ s << v[i];
+ if (i != v.size() - 1)
+ s << ", ";
+ }
+ s << ']';
+ return s.str();
+ },
+ "Return the canonical string representation of this list."
+ );
+}
+
+// Provide the buffer interface for vectors if we have data() and we have a format for it
+// GCC seems to have "void std::vector<bool>::data()" - doing SFINAE on the existence of data() is insufficient, we need to check it returns an appropriate pointer
+template <typename Vector, typename = void>
+struct vector_has_data_and_format : std::false_type {};
+template <typename Vector>
+struct vector_has_data_and_format<Vector, enable_if_t<std::is_same<decltype(format_descriptor<typename Vector::value_type>::format(), std::declval<Vector>().data()), typename Vector::value_type*>::value>> : std::true_type {};
+
+// Add the buffer interface to a vector
+template <typename Vector, typename Class_, typename... Args>
+enable_if_t<detail::any_of<std::is_same<Args, buffer_protocol>...>::value>
+vector_buffer(Class_& cl) {
+ using T = typename Vector::value_type;
+
+ static_assert(vector_has_data_and_format<Vector>::value, "There is not an appropriate format descriptor for this vector");
+
+ // numpy.h declares this for arbitrary types, but it may raise an exception and crash hard at runtime if PYBIND11_NUMPY_DTYPE hasn't been called, so check here
+ format_descriptor<T>::format();
+
+ cl.def_buffer([](Vector& v) -> buffer_info {
+ return buffer_info(v.data(), static_cast<ssize_t>(sizeof(T)), format_descriptor<T>::format(), 1, {v.size()}, {sizeof(T)});
+ });
+
+ cl.def(init([](buffer buf) {
+ auto info = buf.request();
+ if (info.ndim != 1 || info.strides[0] % static_cast<ssize_t>(sizeof(T)))
+ throw type_error("Only valid 1D buffers can be copied to a vector");
+ if (!detail::compare_buffer_info<T>::compare(info) || (ssize_t) sizeof(T) != info.itemsize)
+ throw type_error("Format mismatch (Python: " + info.format + " C++: " + format_descriptor<T>::format() + ")");
+
+ T *p = static_cast<T*>(info.ptr);
+ ssize_t step = info.strides[0] / static_cast<ssize_t>(sizeof(T));
+ T *end = p + info.shape[0] * step;
+ if (step == 1) {
+ return Vector(p, end);
+ }
+ else {
+ Vector vec;
+ vec.reserve((size_t) info.shape[0]);
+ for (; p != end; p += step)
+ vec.push_back(*p);
+ return vec;
+ }
+ }));
+
+ return;
+}
+
+template <typename Vector, typename Class_, typename... Args>
+enable_if_t<!detail::any_of<std::is_same<Args, buffer_protocol>...>::value> vector_buffer(Class_&) {}
+
+PYBIND11_NAMESPACE_END(detail)
+
+//
+// std::vector
+//
+template <typename Vector, typename holder_type = std::unique_ptr<Vector>, typename... Args>
+class_<Vector, holder_type> bind_vector(handle scope, std::string const &name, Args&&... args) {
+ using Class_ = class_<Vector, holder_type>;
+
+ // If the value_type is unregistered (e.g. a converting type) or is itself registered
+ // module-local then make the vector binding module-local as well:
+ using vtype = typename Vector::value_type;
+ auto vtype_info = detail::get_type_info(typeid(vtype));
+ bool local = !vtype_info || vtype_info->module_local;
+
+ Class_ cl(scope, name.c_str(), pybind11::module_local(local), std::forward<Args>(args)...);
+
+ // Declare the buffer interface if a buffer_protocol() is passed in
+ detail::vector_buffer<Vector, Class_, Args...>(cl);
+
+ cl.def(init<>());
+
+ // Register copy constructor (if possible)
+ detail::vector_if_copy_constructible<Vector, Class_>(cl);
+
+ // Register comparison-related operators and functions (if possible)
+ detail::vector_if_equal_operator<Vector, Class_>(cl);
+
+ // Register stream insertion operator (if possible)
+ detail::vector_if_insertion_operator<Vector, Class_>(cl, name);
+
+ // Modifiers require copyable vector value type
+ detail::vector_modifiers<Vector, Class_>(cl);
+
+ // Accessor and iterator; return by value if copyable, otherwise we return by ref + keep-alive
+ detail::vector_accessor<Vector, Class_>(cl);
+
+ cl.def("__bool__",
+ [](const Vector &v) -> bool {
+ return !v.empty();
+ },
+ "Check whether the list is nonempty"
+ );
+
+ cl.def("__len__", &Vector::size);
+
+
+
+
+#if 0
+ // C++ style functions deprecated, leaving it here as an example
+ cl.def(init<size_type>());
+
+ cl.def("resize",
+ (void (Vector::*) (size_type count)) & Vector::resize,
+ "changes the number of elements stored");
+
+ cl.def("erase",
+ [](Vector &v, SizeType i) {
+ if (i >= v.size())
+ throw index_error();
+ v.erase(v.begin() + i);
+ }, "erases element at index ``i``");
+
+ cl.def("empty", &Vector::empty, "checks whether the container is empty");
+ cl.def("size", &Vector::size, "returns the number of elements");
+ cl.def("push_back", (void (Vector::*)(const T&)) &Vector::push_back, "adds an element to the end");
+ cl.def("pop_back", &Vector::pop_back, "removes the last element");
+
+ cl.def("max_size", &Vector::max_size, "returns the maximum possible number of elements");
+ cl.def("reserve", &Vector::reserve, "reserves storage");
+ cl.def("capacity", &Vector::capacity, "returns the number of elements that can be held in currently allocated storage");
+ cl.def("shrink_to_fit", &Vector::shrink_to_fit, "reduces memory usage by freeing unused memory");
+
+ cl.def("clear", &Vector::clear, "clears the contents");
+ cl.def("swap", &Vector::swap, "swaps the contents");
+
+ cl.def("front", [](Vector &v) {
+ if (v.size()) return v.front();
+ else throw index_error();
+ }, "access the first element");
+
+ cl.def("back", [](Vector &v) {
+ if (v.size()) return v.back();
+ else throw index_error();
+ }, "access the last element ");
+
+#endif
+
+ return cl;
+}
+
+
+
+//
+// std::map, std::unordered_map
+//
+
+PYBIND11_NAMESPACE_BEGIN(detail)
+
+/* Fallback functions */
+template <typename, typename, typename... Args> void map_if_insertion_operator(const Args &...) { }
+template <typename, typename, typename... Args> void map_assignment(const Args &...) { }
+
+// Map assignment when copy-assignable: just copy the value
+template <typename Map, typename Class_>
+void map_assignment(enable_if_t<is_copy_assignable<typename Map::mapped_type>::value, Class_> &cl) {
+ using KeyType = typename Map::key_type;
+ using MappedType = typename Map::mapped_type;
+
+ cl.def("__setitem__",
+ [](Map &m, const KeyType &k, const MappedType &v) {
+ auto it = m.find(k);
+ if (it != m.end()) it->second = v;
+ else m.emplace(k, v);
+ }
+ );
+}
+
+// Not copy-assignable, but still copy-constructible: we can update the value by erasing and reinserting
+template<typename Map, typename Class_>
+void map_assignment(enable_if_t<
+ !is_copy_assignable<typename Map::mapped_type>::value &&
+ is_copy_constructible<typename Map::mapped_type>::value,
+ Class_> &cl) {
+ using KeyType = typename Map::key_type;
+ using MappedType = typename Map::mapped_type;
+
+ cl.def("__setitem__",
+ [](Map &m, const KeyType &k, const MappedType &v) {
+ // We can't use m[k] = v; because value type might not be default constructable
+ auto r = m.emplace(k, v);
+ if (!r.second) {
+ // value type is not copy assignable so the only way to insert it is to erase it first...
+ m.erase(r.first);
+ m.emplace(k, v);
+ }
+ }
+ );
+}
+
+
+template <typename Map, typename Class_> auto map_if_insertion_operator(Class_ &cl, std::string const &name)
+-> decltype(std::declval<std::ostream&>() << std::declval<typename Map::key_type>() << std::declval<typename Map::mapped_type>(), void()) {
+
+ cl.def("__repr__",
+ [name](Map &m) {
+ std::ostringstream s;
+ s << name << '{';
+ bool f = false;
+ for (auto const &kv : m) {
+ if (f)
+ s << ", ";
+ s << kv.first << ": " << kv.second;
+ f = true;
+ }
+ s << '}';
+ return s.str();
+ },
+ "Return the canonical string representation of this map."
+ );
+}
+
+
+PYBIND11_NAMESPACE_END(detail)
+
+template <typename Map, typename holder_type = std::unique_ptr<Map>, typename... Args>
+class_<Map, holder_type> bind_map(handle scope, const std::string &name, Args&&... args) {
+ using KeyType = typename Map::key_type;
+ using MappedType = typename Map::mapped_type;
+ using Class_ = class_<Map, holder_type>;
+
+ // If either type is a non-module-local bound type then make the map binding non-local as well;
+ // otherwise (e.g. both types are either module-local or converting) the map will be
+ // module-local.
+ auto tinfo = detail::get_type_info(typeid(MappedType));
+ bool local = !tinfo || tinfo->module_local;
+ if (local) {
+ tinfo = detail::get_type_info(typeid(KeyType));
+ local = !tinfo || tinfo->module_local;
+ }
+
+ Class_ cl(scope, name.c_str(), pybind11::module_local(local), std::forward<Args>(args)...);
+
+ cl.def(init<>());
+
+ // Register stream insertion operator (if possible)
+ detail::map_if_insertion_operator<Map, Class_>(cl, name);
+
+ cl.def("__bool__",
+ [](const Map &m) -> bool { return !m.empty(); },
+ "Check whether the map is nonempty"
+ );
+
+ cl.def("__iter__",
+ [](Map &m) { return make_key_iterator(m.begin(), m.end()); },
+ keep_alive<0, 1>() /* Essential: keep list alive while iterator exists */
+ );
+
+ cl.def("items",
+ [](Map &m) { return make_iterator(m.begin(), m.end()); },
+ keep_alive<0, 1>() /* Essential: keep list alive while iterator exists */
+ );
+
+ cl.def("__getitem__",
+ [](Map &m, const KeyType &k) -> MappedType & {
+ auto it = m.find(k);
+ if (it == m.end())
+ throw key_error();
+ return it->second;
+ },
+ return_value_policy::reference_internal // ref + keepalive
+ );
+
+ cl.def("__contains__",
+ [](Map &m, const KeyType &k) -> bool {
+ auto it = m.find(k);
+ if (it == m.end())
+ return false;
+ return true;
+ }
+ );
+
+ // Assignment provided only if the type is copyable
+ detail::map_assignment<Map, Class_>(cl);
+
+ cl.def("__delitem__",
+ [](Map &m, const KeyType &k) {
+ auto it = m.find(k);
+ if (it == m.end())
+ throw key_error();
+ m.erase(it);
+ }
+ );
+
+ cl.def("__len__", &Map::size);
+
+ return cl;
+}
+
+PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
--- /dev/null
+dune_python_add_test(NAME pythontests
+ SCRIPT pythontests.py
+ WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
+ LABELS quick)
+
+dune_add_test(SOURCES test_embed1.cc
+ LINK_LIBRARIES ${DUNE_LIBS} ${Python3_LIBRARIES}
+ LABELS quick
+ )
+
+add_executable(test_embed2 EXCLUDE_FROM_ALL test_embed2.cc)
+target_link_libraries(test_embed2 PUBLIC ${DUNE_LIBS} ${Python3_LIBRARIES})
+# the following should apparently work but doesn't target_link_libraries(test_embed2 PRIVATE Python3::Python)
--- /dev/null
+from io import StringIO
+classACode="""
+struct MyClassA {
+ MyClassA(int a,int b) : a_(a), b_(b) {}
+ int a_,b_;
+};
+"""
+classBCode="""
+#include <cmath>
+template <class T> struct MyClassB {
+ MyClassB(T &t, int p) : a_(std::pow(t.a_,p)), b_(std::pow(t.b_,p)) {}
+ int a_,b_;
+};
+"""
+runCode="""
+template <class T> int run(T &t) {
+ return t.a_ * t.b_;
+}
+"""
+
+def test_class_export():
+ from dune.generator.importclass import load
+ from dune.generator.algorithm import run
+ from dune.generator import path
+ from dune.typeregistry import generateTypeName
+ cls = load("MyClassA",StringIO(classACode),10,20)
+ assert run("run",StringIO(runCode),cls) == 10*20
+ clsName,includes = generateTypeName("MyClassB",cls)
+ cls = load(clsName,StringIO(classBCode),cls,2)
+ assert run("run",StringIO(runCode),cls) == 10**2*20**2
+if __name__ == "__main__":
+ try:
+ from dune.common.module import get_dune_py_dir
+ _ = get_dune_py_dir()
+ test_class_export()
+ except ImportError:
+ pass
--- /dev/null
+#include <iostream>
+
+#include <dune/common/fvector.hh>
+#include <dune/python/common/fvector.hh>
+#include <dune/python/pybind11/embed.h>
+
+int main()
+{
+ /*
+ remark: combine getting the guard and loading
+ dune.common in a single 'initialization' function -
+ the dune.common module can also be used to register additional
+ types - although a 'dummy' scope can also be used, i.e.,
+ pybind11::handle scope;
+ */
+ pybind11::scoped_interpreter guard{};
+ pybind11::module dcommon = pybind11::module::import("dune.common");
+ auto global = pybind11::dict(pybind11::module::import("__main__").attr("__dict__"));
+
+ // using the C++ registry functions directly:
+ Dune::Python::registerFieldVector<double,2> ( dcommon );
+ /*
+ // instead of importing dune.common one could use the dummy scope and
+ // the first step is done in 'dune.common' (_common.cc)
+ Dune::Python::addToTypeRegistry<double>(Dune::Python::GenerateTypeName("double"));
+ // now we want to pass a FV<double,2> to Python so need to register that
+ Dune::Python::registerFieldVector<double,2> ( scope );
+ */
+
+ // first set of tests
+ {
+ // First example:
+ // Call a C++ function that generats a FV,
+ // call that in Python and compute squared 2-norm
+ auto local = pybind11::dict();
+ local["call_test"] = pybind11::cpp_function([&]() -> auto
+ { return Dune::FieldVector<double,2>{4,2}; });
+ auto result = pybind11::eval<pybind11::eval_statements>(
+ "print('Example 1');\n"
+ "x = call_test();\n"
+ "norm2_x = x.two_norm2;\n"
+ "print('results',x);",
+ global, local
+ );
+ auto &x = local["x"].cast<Dune::FieldVector<double,2>&>();
+ auto norm2_x = local["norm2_x"].cast<double>();
+ if( !result.is( pybind11::none() )
+ || (x != Dune::FieldVector<double,2>{4,2})
+ || (norm2_x != 20) )
+ std::cout << "Test 1 failed" << std::endl;
+
+ // Second example:
+ // Call a C++ function pssing in a FV reference and changing that
+ // Note that the FV passed in is 'x&' generated in Example 2
+ local["call_testref"] = pybind11::cpp_function([&]
+ (Dune::FieldVector<double,2>& y) -> auto
+ { y+=Dune::FieldVector<double,2>{-4,-2}; });
+ auto resultref = pybind11::eval<pybind11::eval_statements>(
+ "print('Example 2');\n"
+ "call_testref(x);\n"
+ "norm2_x = x.two_norm2;\n"
+ "print('result',x);",
+ global, local
+ );
+ norm2_x = local["norm2_x"].cast<double>();
+ if( !resultref.is( pybind11::none() )
+ || (x != Dune::FieldVector<double,2>{0,0})
+ || (norm2_x != 0) )
+ std::cout << "Test 2 failed" << std::endl;
+
+ // Third example:
+ // Construct a FV on C++ side and use that on Python side
+ // Note: local["z"]=z generates a copy so to retrieve the changed
+ // FV one needs to copy again using z = local["z"].cast<...>();
+ Dune::FieldVector<double,2> z{4,2};
+ local["call_testref2"] = pybind11::cpp_function([&]
+ (Dune::FieldVector<double,2>& y) -> auto
+ { y+=z; });
+ local["z"] = z;
+ auto resultref2 = pybind11::eval<pybind11::eval_statements>(
+ "print('Example 3');\n"
+ "import dune.common;\n"
+ "zz=dune.common.FieldVector((2,4));\n"
+ "call_testref2(zz);\n"
+ "print('results',zz,'using',z);"
+ "z *= 2\n"
+ "print('changed z to',z);",
+ global, local
+ );
+ z = local["z"].cast<Dune::FieldVector<double,2>>();
+ std::cout << "change of z on C++ side:" << z << std::endl;
+ auto &zz = local["zz"].cast<Dune::FieldVector<double,2>&>();
+ if( !resultref2.is( pybind11::none() )
+ || (zz != Dune::FieldVector<double,2>{6,6}) )
+ std::cout << "Test 3 failed" << std::endl;
+
+ // Example 4
+ // Can also use pointer to a FV in the `local` dict soo that
+ // changes on the Python side are available on the C++ side without copy
+ auto newLocal = pybind11::dict(); // test with a new local dict
+ Dune::FieldVector<double,2> fv2{4,2};
+ newLocal["fv2"] = pybind11::cast(&fv2);
+ auto resultFVptr = pybind11::eval<pybind11::eval_statements>(
+ "print('Example 4');\n"
+ "print('changed fv from',fv2,end=' -> ')\n"
+ "fv2 *= 2\n"
+ "print(fv2);",
+ global, newLocal
+ );
+ std::cout << "C++ FV=" << fv2 << std::endl;
+ if( !resultFVptr.is( pybind11::none() )
+ || (fv2 != Dune::FieldVector<double,2>{8,4}) )
+ std::cout << "Test 4 failed" << std::endl;
+ }
+
+ // the final example uses the `FieldVector` function from the
+ // dune.common module - this approach requires JIT in dune-py and
+ // is turned off for the general embedding tests
+ if (false)
+ {
+ // Example 5
+ // Similar to example 3 but without copying similar to Example 4
+ auto pyfv = dcommon.attr("FieldVector")(std::vector<double>{4,2});
+ Dune::FieldVector<double,2>& fv = pyfv.cast<Dune::FieldVector<double,2>&>();
+ std::cout << "FV=" << pyfv << "==" << fv << std::endl;
+ auto newLocal = pybind11::dict();
+ newLocal["fv"] = pyfv;
+ auto resultFVa = pybind11::eval<pybind11::eval_statements>(
+ "print('Example 5a');\n"
+ "print('changed fv from',fv,end=' -> ')\n"
+ "fv *= 2\n"
+ "print(fv);",
+ global, newLocal
+ );
+ std::cout << "C++ FV=" << fv << std::endl;
+ if( !resultFVa.is( pybind11::none() )
+ || (fv != Dune::FieldVector<double,2>{8,4}) )
+ std::cout << "Test 5a failed" << std::endl;
+ auto resultFVb = pybind11::eval<pybind11::eval_statements>(
+ "print('Example 5b');\n"
+ "print('changed fv from',fv,end=' -> ')\n"
+ "fv *= 2\n"
+ "print(fv);",
+ global, newLocal
+ );
+ std::cout << "C++ FV=" << fv << std::endl;
+ if( !resultFVb.is( pybind11::none() )
+ || (fv != Dune::FieldVector<double,2>{16,8}) )
+ std::cout << "Test 5b failed" << std::endl;
+ }
+}
--- /dev/null
+#include <iostream>
+
+#include <dune/common/fvector.hh>
+#include <dune/python/common/fvector.hh>
+#include <dune/python/pybind11/embed.h>
+
+int main()
+{
+ pybind11::scoped_interpreter guard{};
+ auto global = pybind11::dict(pybind11::module::import("__main__").attr("__dict__"));
+ {
+ pybind11::module dcommon = pybind11::module::import("dune.common");
+ // the following not only returns a fv but more importantly registers
+ // the FV (but leads to JIT (the first time it's called)
+ // in contrast to the approach in 'test_embed1.cc').
+ // Perhaps the use of 'std::vector' could be improved
+ auto fv = dcommon.attr("FieldVector")(std::vector<double>{4,2});
+ std::cout << "FV=" << fv << std::endl;
+
+ // now execute Python code
+ auto local = pybind11::dict();
+ local["call_test"] = pybind11::cpp_function([&]() -> auto
+ { return Dune::FieldVector<double,2>{4,2}; });
+ auto result = pybind11::eval<pybind11::eval_statements>(
+ "print('Hello World!');\n"
+ "x = call_test();\n"
+ "norm2_x = x.two_norm2;\n"
+ "print(x);",
+ global, local
+ );
+ auto x = local["x"].cast<Dune::FieldVector<double,2>>();
+ if( !result.is( pybind11::none() ) || (x != Dune::FieldVector<double,2>{4,2}) )
+ std::cout << "Test 1 failed" << std::endl;
+ auto norm2_x = local["norm2_x"].cast<double>();
+ if( !result.is( pybind11::none() ) || (norm2_x != 20) )
+ std::cout << "Test 1 failed" << std::endl;
+ }
+}
--- /dev/null
+install(FILES dunemodules.lib
+ DESTINATION ${CMAKE_INSTALL_BINDIR}/../lib)
--- /dev/null
+# -*-sh-*-
+
+###############################################
+###
+### Configuration
+###
+
+# name of the "control" files
+CONTROL="dune.module"
+
+###############################################
+###
+### check for environment variables
+###
+
+if test -z $GREP; then
+ GREP=grep
+fi
+if test -z "$SED"; then
+ SED=sed
+fi
+# SunOS [e]grep does not seem to comprehend character classes. Set up
+# some variables to spell them out
+UPPER=ABCDEFGHIJKLMNOPQRSTUVWXYZ
+LOWER=abcdefghijklmnopqrstuvwxyz
+ALPHA="$UPPER$LOWER"
+DIGIT=0123456789
+ALNUM="$ALPHA$DIGIT"
+
+space=" "
+formfeed="\f"
+newline="
+"
+cr="
+"
+tab=" "
+vtab="\v"
+# $SPACE will unfortunately not work since grep will not accept an
+# embedded newline. Instead one can often get away with using $BLANK
+SPACE="$space$formfeed$newline$cr$tab$vtab"
+BLANK="$space$tab"
+NOBLANK="^$space$tab"
+
+#
+# read parameters from a $CONTROL file
+#
+# parameters:
+# $1 file to read
+#
+PARSER_TRIM="awk '{gsub(/^[ \\t]+|[ \\t]+$/,\"\");printf(\"%s\", \$0);}'"
+parse_control() {
+ # check file existence
+ if test ! -f "$1" -o "$(basename "$1")" != "$CONTROL"; then
+ echo "ERROR: '$1' is no $CONTROL file" >&2
+ exit 1
+ fi
+ # reset information handling
+ module=""
+ module_inst="no"
+ # read parameters from control file
+ local name="$($GREP Module: "$1" | cut -d ':' -f2 | eval $PARSER_TRIM)"
+ if test "x$name" = "x"; then
+ echo "ERROR: $CONTROL files $1 does not contain a Module entry" >&2
+ exit 1
+ fi
+ # create and check variable name from module name
+ export module=$(fix_variable_name $name)
+ if ! check_modname "$module"; then
+ echo "ERROR: $CONTROL files $1 contains an invalid Module entry" >&2
+ exit 1
+ fi
+ # don't include the generated `dune-py` module in the dependency list
+ if test "$module" = "dune_py" ; then
+ return
+ fi
+ # read dune.module file
+ local deps="$($GREP "^[BLANK]*Depends:" "$1" | cut -d ':' -f2 | eval $PARSER_TRIM)"
+ local sugs="$($GREP "^[BLANK]*Suggests:" "$1" | cut -d ':' -f2 | eval $PARSER_TRIM)"
+ local vers="$($GREP "^[BLANK]*Version:" "$1" | cut -d ':' -f2 | eval $PARSER_TRIM)"
+ local main="$($GREP "^[BLANK]*Maintainer:" "$1" | cut -d ':' -f2 | eval $PARSER_TRIM)"
+ # check whether the module is installed.
+ # - installed modules can be found via pkg-config
+ # - pkg-config --var=prefix should be the same as $path
+ #
+ # the path contains a different sub structure
+ # for installed and source modules
+ # - installed module: ${path}/lib/dunecontrol/${name}/dune.module
+ # and there is a file ${path}/lib/pkgconfig/${name}.pc
+ # - source module: ${path}/dune.module
+ # and there is a file ${path}/${name}.pc.in
+ local path="$(canonicalpath "$1")"
+ if pkg-config $name; then
+ local prefix="$(pkg-config --variable=prefix $name)"
+ local pkgpath=$(canonicalname "$prefix/lib/dunecontrol/$name")
+ if test x"$pkgpath" = x"$path"; then
+ path="$prefix"
+ module_inst="yes"
+ fi
+ fi
+ # avoid multiple definition of the same module
+ if eval test "x\$HAVE_$module" != "x"; then
+ # make sure we don't stumble over the same module twice
+ if eval test "\$PATH_$module" = "$path"; then
+ return
+ fi
+ local old_mod_inst
+ eval old_mod_inst=\$INST_$module
+ case "$old_mod_inst$module_inst" in
+ # multiple local modules are an error
+ # multiple installed modules are an error
+ nono|yesyes)
+ echo "ERROR: multiple definition of module $name" >&2
+ echo "previous defined in:" >&2
+ if eval test x\$INST_$module = "xyes"; then
+ echo " $(eval echo \$PATH_$module)/lib/dunecontrol/$name/$CONTROL" >&2
+ else
+ echo " $(eval echo \$PATH_$module)/$CONTROL" >&2
+ fi
+ echo "redefined in:" >&2
+ if test "$module_inst" = "yes"; then
+ echo " $path/lib/dunecontrol/$name/$CONTROL" >&2
+ else
+ echo " $path/$CONTROL" >&2
+ fi
+ exit 1
+ ;;
+ # installed modules are superseded by locally built modules
+ noyes)
+ return
+ ;;
+ # local modules supersede installed modules
+ yesno)
+ superseded_modules="$superseded_modules $path"
+ true # do nothing, ignore the previously found module
+ ;;
+ esac
+ fi
+ # set status variables
+ export HAVE_$module=yes
+ export PATH_$module="$path"
+ export VERS_$module="$vers"
+ export NAME_$module="$name"
+ export MAIN_$module="$main"
+ export DEPS_$module="$deps"
+ export INST_$module="$module_inst"
+ for name in $deps; do
+ mod=$(fix_variable_name $name)
+ export NAME_$mod="$name"
+ done
+ export SUGS_$module="$sugs"
+ for name in $sugs; do
+ mod=$(fix_variable_name $name)
+ export NAME_$mod="$name"
+ done
+ # update list of modules
+ if test "$module_inst" = "yes"; then
+ export INSTMODULES="$INSTMODULES$module "
+ else
+ export LOCALMODULES="$LOCALMODULES$module "
+ fi
+}
+
+# Uses the current compiler to extract information about the
+# multiarch triplets and sets the export variable MULTIARCH_LIBDIR
+# according to it.
+# If not compiler is specified then cc or gcc is used.
+extract_multiarch_pkg_config_path(){
+ local my_cxx_compiler
+ if test "x$MULTIARCH_LIBDIR" != "x"; then
+ return
+ fi
+ set +e #error in the multiarch detection should not be fatal.
+ $(which cc &>/dev/null)
+ if test $? -eq "0"; then
+ my_cxx_compiler=cc
+ else
+ my_cxx_compiler=gcc
+ fi
+ multiarch=$($my_cxx_compiler --print-multiarch 2>/dev/null)
+ if test $? -gt 0; then
+ for i in "target=" "Target:"; do
+ multiarch=$($my_cxx_compiler -v 2>&1| $GREP "$i" | sed "s/.*$i[$BLANK]*\([a-z0-9_-]*\)/\1/" | $SED "s/-[a-z]*-linux-gnu/-linux-gnu/")
+ if test -n "$multiarch"; then break; fi
+ done
+ fi
+ set -e # set to old value.
+ export MULTIARCH_LIBDIR="lib/$multiarch"
+
+ # create PKG_CONFIG_PATH for installed dune modules
+ for i in $MULTIARCH_LIBDIR lib64 lib32 lib; do
+ if test -d "$PREFIX_DIR/$i/pkgconfig"; then
+ export PKG_CONFIG_PATH="$PKG_CONFIG_PATH:$PREFIX_DIR/$i/pkgconfig"
+ fi
+ done
+}
+
+
+#
+# try to setup the control path
+#
+setup_control_path() {
+ if test -z "$DUNE_CONTROL_PATH"; then
+ DUNE_CONTROL_PATH=.
+ # try pkg-config locations
+ if ! pkg-config dune-common; then
+ # try usual locations of installed modules
+ for i in /usr/local/lib/dunecontrol/ /usr/lib/dunecontrol/; do
+ if test -d $i; then
+ DUNE_CONTROL_PATH=$DUNE_CONTROL_PATH:"$i"
+ fi
+ done
+ for i in `echo $PKG_CONFIG_PATH | tr ':' ' '`; do
+ if test -d "$i/../dunecontrol"; then
+ DUNE_CONTROL_PATH=$DUNE_CONTROL_PATH:"$i/../dunecontrol"
+ fi
+ done
+ else
+ NEW_DUNE_CONTROL_DIR=$(pkg-config dune-common --variable=prefix)/lib/dunecontrol
+ # There might be old version of DUNE lying around, that do not contain
+ # lib/dunecontrol, yet. To prevent failur at a later stage of dunecontrol
+ # we check that the directory really exists.
+ if test -d "$NEW_DUNE_CONTROL_DIR"; then
+ DUNE_CONTROL_PATH=$DUNE_CONTROL_PATH:$NEW_DUNE_CONTROL_DIR
+ fi
+ fi
+ fi
+ # try to read DUNE_CONTROL_PATH from OPTS file
+ if test -n "$DUNE_OPTS_FILE"; then
+ DUNE_CONTROL_PATH="$(. $DUNE_OPTS_FILE; eval echo $DUNE_CONTROL_PATH)"
+ fi
+ # canonicalize path
+ local TMP=""
+ # foreach dir in $@
+ while read dir; do
+ if ! test -e "$dir"; then
+ echo "ERROR: The path \"$dir\" given in DUNE_CONTROL_PATH does not exist."
+ exit 1
+ fi
+ TMP=$TMP:"$(canonicalname "$dir")"
+ done <<EOF
+ $(echo $DUNE_CONTROL_PATH | sed -e 's/:\+/:/g' | tr ':' '\n')
+EOF
+ # sort+uniq path
+ DUNE_CONTROL_PATH="$(echo $TMP | tr ':' '\n' | sort -u | tr '\n' ':' | sed -e 's/^://' -e 's/:$//')"
+ # safe result
+ export DUNE_CONTROL_PATH
+}
+
+#
+# search for modules in each directory in DUNE_CONTROL_PATH
+#
+find_modules_in_path() {
+ setup_control_path
+ if test -z "$FOUND_MODULES"; then
+ # foreach dir in $@
+ while read dir; do
+ if test -d "$dir"; then
+ while read m; do
+ test -n "$m" && parse_control "$m"
+ done <<EOFM
+ $(find -H "$dir" -name $CONTROL | $GREP -v 'dune-[-_a-zA-Z]/dune-[-a-zA-Z_]*-[0-9]\{1,\}.[0-9]\{1,\}/')
+EOFM
+ else
+ parse_control "$dir"
+ fi
+ done <<EOF
+ $(echo $DUNE_CONTROL_PATH | sed -e 's/:\+/:/g' | tr ':' '\n')
+EOF
+ export MODULES="$LOCALMODULES$INSTMODULES"
+ export FOUND_MODULES="$MODULES"
+ else
+ export MODULES="$FOUND_MODULES"
+ fi
+}
+
+#
+# sort $MODULES according to the dependencies
+#
+sort_modules() {
+ # reset lists
+ export SORTEDMODULES=""
+ export REVERSEMODULES=""
+ export SORTEDMODULES_SUB=""
+ # handle each modules passed as parameter
+ for m in "$@"; do
+ # did we find a module file for this module?
+ if eval test x\$HAVE_$m != x; then
+ _sort_module $m MAIN
+ else
+ echo "ERROR: could not find module $(eval echo \$NAME_$m)" >&2
+ exit 1
+ fi
+ done
+ # save result
+ export MODULES="$SORTEDMODULES"
+ # setup list of SUGS/DEPS and the INFO list
+ export SORTEDMODULES_INFO=""
+ export SORTEDMODULES_DEPS=""
+ export SORTEDMODULES_MAIN=""
+ export SORTEDMODULES_SUGS=""
+ local mode
+ for m in $MODULES; do
+ eval mode=\$MODE_$m
+ SORTEDMODULES_INFO="$SORTEDMODULES_INFO $m[$mode]"
+ eval SORTEDMODULES_$mode=\"\$SORTEDMODULES_$mode $m\"
+ done
+ export SORTEDMODULES_INFO
+ export SORTEDMODULES_DEPS
+ export SORTEDMODULES_SUGS
+ export SORTEDMODULES_MAIN
+ # clean up temporary variables
+ for m in $MODULES; do
+ export MODE_$m=""
+ export SORT_DONE_$m=""
+ export SORT_DEPS_DONE_$m=""
+ export SORT_SUGS_DONE_$m=""
+ done
+}
+
+# strip any leading spaces, tabs, or newlines
+_ltrim()
+{
+ local val="$1"
+ local tab=" "
+ local nl="
+"
+ while :; do
+ case $val in
+ " "*|"$tab"*|"$nl"*)
+ val=${val#?}
+ ;;
+ *)
+ break
+ ;;
+ esac
+ done
+ # if this is used inside `...` or $(...), any trailing newlines will be
+ # stripped, no matter what we do. Setting a variable to the result would
+ # work around this, but is not a common idiom.
+ printf "%s" "$val"
+}
+
+_check_deps()
+{
+ local module="$1"
+ local mode="$2"
+ local depmode="$3"
+ local report="ERROR"
+ local requires="requires"
+ local required="required"
+ local dependency="dependency"
+ if test "x$mode" = "xSUGS"; then
+ report="WARNING"
+ requires="suggests"
+ required="suggested"
+ dependency="suggestion"
+ fi
+ eval deps=\$${mode}_$module
+ #initially remove leading space
+ deps=$(_ltrim "$deps")
+ while test -n "$deps"; do
+ #the end of the name is marked either by space or opening parenthesis
+ name="${deps%%[ (]*}"
+ #remove the name and adjacent whitespace
+ deps=$(_ltrim "${deps#"$name"}")
+ #check whether there is a dependency version
+ case "$deps" in
+ '('*) deps="${deps#(}"
+ depver="${deps%%)*}"
+ deps="${deps#*)}"
+ ;;
+ *) depver=
+ ;;
+ esac
+ #remove any leading whitespace for the next iteration
+ deps=$(_ltrim "$deps")
+
+ dep=$(fix_variable_name $name)
+ if ! check_modname $dep; then
+ echo "ERROR: invalid module name '$name' ($dependency of '$module')" >&2
+ exit 1
+ fi
+ if eval test x\$HAVE_$dep != "x"; then
+ eval ver=\$VERS_$dep
+ if test "$SKIPVERSIONCHECK" != "yes" && ! check_version "$ver" "$depver"; then
+ echo "$report: version mismatch." >&2
+ echo " $modname $requires '$name $depver'," >&2
+ echo " but only '$name' = '$ver' is available." >&2
+ if test "x$mode" = "xDEPS"; then
+ exit 1
+ else
+ echo "Skipping '$name'!" >&2
+ continue
+ fi
+ fi
+ _sort_module $dep $depmode
+ else
+ # perhaps this module is installed,
+ # then it should be handled via pkg-config
+ if ! pkg-config $name; then
+ echo "$report: could not find module '$name'," >&2
+ echo " module is also unknown to pkg-config." >&2
+ echo " Maybe you need to adjust PKG_CONFIG_PATH!" >&2
+ echo " '$name' is $required by $modname" >&2
+ if test "x$mode" = "xDEPS"; then
+ exit 1
+ else
+ echo "Skipping '$name'!" >&2
+ continue
+ fi
+ else
+ eval ver=$(pkg-config $name --modversion)
+ if test "$SKIPVERSIONCHECK" != "yes" && ! check_version "$ver" "$depver"; then
+ echo "$report: version mismatch." >&2
+ echo " $modname $requires '$name $depver'," >&2
+ echo " but only '$name' = '$ver' is installed." >&2
+ if test "x$mode" = "xDEPS"; then
+ exit 1
+ else
+ echo "Skipping '$name'!" >&2
+ continue
+ fi
+ fi
+ # update module list
+ parse_control $(pkg-config $name --variable=prefix)/lib/dunecontrol/$name/dune.module
+ _sort_module $dep $depmode
+ fi
+ fi
+ done
+}
+
+#
+# recursive part of sort_modules
+# evaluate dependencies of one module
+#
+# parameters:
+# $1 name of the modules
+# $2 parser mode:
+# DEPS: search for dependencies
+# SUSG: search for suggestions (DEPS of SUGS are handled as SUGS)
+# MAIN: primary invocation of a DEPS search,
+# MAIN modules are not added to the list of DEPS/SUGS
+#
+_sort_module() {
+ local module="$1"
+ local mode="$2"
+ test -n "$mode"
+ local modname=""
+ eval modname=\$NAME_$module
+ local deps=""
+ local name=""
+ local dep=""
+ local ver=""
+ local depver=""
+ shift 1
+ if ! check_modname $module; then
+ echo "ERROR: invalid module name $module" >&2
+ exit 1
+ fi
+ depmode=$(test $mode = SUGS && echo SUGS || echo DEPS)
+ if eval test "x\$SORT_${depmode}_DONE_$module" != "xyes"; then
+ # stop any recursion
+ export SORT_${depmode}_DONE_$module=yes
+ # resolve dependencies
+ _check_deps $module DEPS $depmode # it might happen that the DEPS are actually SUGS
+ # resolve suggestions
+ _check_deps $module SUGS SUGS
+ # remember mode of the module
+ if eval test "x\$MODE_$module" = xSUGS -o "x\$MODE_$module" = x; then
+ export MODE_$module=$mode
+ fi
+ # topological list of the module and its dependencies/suggestions
+ if eval test "x\$SORT_DONE_$module" != "xyes"; then
+ export SORT_DONE_$module=yes
+ export SORTEDMODULES="$SORTEDMODULES $module"
+ export REVERSEMODULES="$module $REVERSEMODULES"
+ fi
+ fi
+}
+
+#
+# load the $CONTROL file, skip all control variables
+# and run a command
+#
+# parameters:
+# $1 command to execute
+# $2 full path of the $CONTROL file
+#
+eval_control() {
+ local command="$1"
+ local file="$2"
+ shift 2
+ if test -f "$file"; then
+ # open subshell
+ (
+ set -e
+ # load functions defined in $file
+ # if $command is not defined in $file,
+ # then the default implementation will be executed
+ eval "$($GREP -v "^[-$ALNUM]\{1,\}:" "$file")"
+ # execute $command
+ $command
+ ) || false
+ else
+ echo "ERROR: could not find $file" >&2
+ exit 1
+ fi
+}
+
+#
+# fix a value such that it is suitable for a variable name
+#
+# parameters:
+# $1 value
+#
+fix_variable_name() {
+ echo ${@//[[:punct:]]/_}
+}
+
+#
+# fix a value such that it is suitable for a variable name and assign it
+#
+# parameters:
+# $1 name of variable
+# $2 value
+#
+fix_and_assign() {
+ local name="$1"
+ if ! check_modname $name; then
+ echo "ERROR: error in assignment. $name is not a valid variabel name." >&2
+ fi
+ shift 1
+ export $name=$(fix_variable_name $@)
+}
+
+#
+# make sure the module name fits the naming convention
+# (we try to assign the name and report upon failure)
+#
+# parameters:
+# $1 module name
+#
+check_modname() {
+ # magic pattern match, see http://www.linuxmisc.com/9-unix-questions/67d307ca51f16ed4.htm
+ [ -n "${1##*[!A-Za-z0-9_]*}" ] && [ -n "${1##[!A-Za-z_]*}" ]
+}
+
+#
+# compare a sub part of the version string
+#
+# parameters:
+# $1 version
+# $2 part
+#
+# parts:
+# 1: major
+# 2: minor
+# 3: revision
+#
+get_sub_version() {
+ #it would be nice to give the part to awk via a "-v FIELD=$2"
+ #command line argument. Unfortunately, SunOS does not support this.
+ #Worse, we cannot use awks int() function here, since under SunOS it
+ #will always return 0 for string input.
+ echo $1 | cut -d. -f"$2" | sed 's/[^0-9].*$//;s/^$/0/'
+}
+
+#
+# compare two versions
+#
+# parameters:
+# $1 version1
+# $2 version1
+#
+# return:
+# 0: equal
+# 1: v1 > v2
+# 2: v1 < v2
+#
+compare_versions() {
+ local v1="$1"
+ local v2="$2"
+
+ for i in 1 2 3; do
+ compare_sub_version $v1 $v2 $i || return 0
+ done
+
+ echo "eq"
+}
+
+compare_sub_version() {
+ # compare sub version number
+ local sub1=`get_sub_version $1 $3`
+ local sub2=`get_sub_version $2 $3`
+
+ if test $sub1 -gt $sub2; then
+ echo "gt"
+ return 1
+ fi
+ if test $sub1 -lt $sub2; then
+ echo "lt"
+ return 1
+ fi
+
+ return 0
+}
+
+check_version() {
+ if test -z "$2"; then # if no constraint is given, check_version is true
+ return 0
+ fi
+ local v=$1
+ local PATTERN="^ *\([<>=]*\) *\([0-9.]*\)\(.*\)$"
+ if test x != `echo "$2" | sed -e "s/$PATTERN/x/"`; then
+ echo "ERROR: invalid version constraint $2" >&2
+ exit 1
+ fi
+ local op=`echo "$2" | sed -e "s/$PATTERN/\1/"`
+ local v2=`echo "$2" | sed -e "s/$PATTERN/\2/"`
+ local rest=`echo "$2" | sed -e "s/$PATTERN/\3/" -e 's/ //g'`
+ local result=1
+
+ local rel=`compare_versions $v $v2`
+
+ case $rel$op in
+ "eq<="|"eq="|"eq>="|\
+ "gt>="|"gt>"|\
+ "lt<="|"lt<")
+ result=0
+ ;;
+ esac
+ if test -z "$rest"; then
+ return $result
+ fi
+ PATTERN="\([|&]\{2\}\)\(.*\)$"
+ if test xx != x`echo "$rest" | sed -e "s/$PATTERN/x/"`; then
+ echo "ERROR: invalid version constraint '$rest'" >&2
+ exit 1
+ fi
+ op=`echo "$rest" | sed -e "s/$PATTERN/\1/"`
+ v2=`echo "$rest" | sed -e "s/$PATTERN/\2/"`
+ if eval "test $result -eq 0" $op "check_version \"$v\" \"$v2\""; then
+ return 0
+ fi
+ return 1
+}
--- /dev/null
+[build-system]
+requires = ['setuptools', 'wheel', 'scikit-build', 'cmake', 'ninja', 'requests', 'portalocker', 'numpy', 'mpi4py']
+build-backend = 'setuptools.build_meta'
--- /dev/null
+add_subdirectory(dune)
+
+configure_file(setup.py.in setup.py)
+configure_file(pyproject.toml.in pyproject.toml)
--- /dev/null
+add_subdirectory(common)
+add_subdirectory(generator)
+add_subdirectory(typeregistry)
+
+add_python_targets(dune
+ __init__
+ __main__
+ create
+ packagemetadata
+ plotting
+ deprecate
+ utility
+)
--- /dev/null
+__import__('pkg_resources').declare_namespace(__name__)
--- /dev/null
+import sys, os
+from argparse import Action, ArgumentParser
+from .packagemetadata import get_dune_py_dir
+
+def configure():
+ # force a reconfiguration of dune-py by deleting tagfile
+ tagfile = os.path.join(get_dune_py_dir(), ".noconfigure")
+ if os.path.exists(tagfile):
+ os.remove(tagfile)
+
+
+if __name__ == '__main__':
+ parser = ArgumentParser(description='Execute DUNE commands', prog='dune')
+ parser.add_argument('command', choices=['configure'], help="Command to be executed")
+
+ args = parser.parse_args()
+
+ if args.command == 'configure':
+ print('Configure dune-py module')
+ configure()
--- /dev/null
+add_python_targets(common
+ __init__
+ checkconfiguration
+ compatibility
+ deprecated # deprecated 2.8
+ hashit
+ locking
+ module
+ pickle # deprecated 2.8
+ project
+ utility
+)
+dune_add_pybind11_module(NAME _common)
+set_property(TARGET _common PROPERTY LINK_LIBRARIES dunecommon APPEND)
+install(TARGETS _common LIBRARY DESTINATION python/dune/common)
--- /dev/null
+import logging
+import os
+
+logger = logging.getLogger(__name__)
+
+loglevel = logging.INFO
+try:
+ loglevel = getattr(logging, os.environ['DUNE_LOG_LEVEL'].upper())
+except KeyError:
+ pass
+except AttributeError:
+ logger.warn('Invalid log level in environment variable DUNE_LOG_LEVEL')
+
+logformat = os.environ.get('DUNE_LOG_FORMAT', 'DUNE-%(levelname)s: %(message)s')
+
+logging.basicConfig(format=logformat, level=loglevel)
+
+try:
+ from mpi4py import MPI
+ if MPI.COMM_WORLD.Get_rank() == 0:
+ logger.debug('MPI initialized successfully')
+except ImportError:
+ logger.debug('mpi4py not found, MPI not initialized')
+
+from ._common import *
+from .deprecated import DeprecatedObject
+
+import numpy
+def fvgetitem(self,index):
+ try:
+ return self._getitem(index)
+ except TypeError:
+ return numpy.array(self,copy=False)[index]
+finished = False
+nr = 1
+while not finished:
+ try:
+ cls = globals()["FieldVector_"+str(nr)]
+ setattr(cls, "_getitem", cls.__getitem__)
+ setattr(cls, "__getitem__", fvgetitem)
+ nr += 1
+ except KeyError:
+ finished = True
+
+def loadvec(includes ,typeName ,constructors=None, methods=None):
+ from dune.generator.generator import SimpleGenerator
+ from dune.common.hashit import hashIt
+ generatorvec = SimpleGenerator("FieldVector","Dune::Python")
+ includes = includes + ["dune/python/common/fvector.hh"]
+ typeHash = "fieldvector_" + hashIt(typeName)
+ return generatorvec.load(includes ,typeName ,typeHash,
+ constructors ,methods, bufferProtocol=True)
+def FieldVector(values):
+ values = list(values)
+ fv = "FieldVector_" + str(len(values))
+ try:
+ return globals()[fv](values)
+ except KeyError:
+ typeName = "Dune::FieldVector< double ," + str(len(values)) + " >"
+ includes = []
+ cls = loadvec(includes, typeName).FieldVector
+ setattr(cls, "_getitem", cls.__getitem__)
+ setattr(cls, "__getitem__", fvgetitem)
+ globals().update({fv:cls})
+ return globals()[fv](values)
+
+# implementation needs to be completed similar to the FV above
+# def FieldMatrix(values):
+# fm = "FieldMatrix_" + str(len(values)) + "_" + str(len(values[0]))
+# return globals()[fm](values)
+
+def _raise(exception):
+ raise exception
--- /dev/null
+// -*- tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#include <config.h>
+
+#include <utility>
+
+#include <dune/common/hybridutilities.hh>
+#include <dune/python/common/dynmatrix.hh>
+#include <dune/python/common/dynvector.hh>
+#include <dune/python/common/fmatrix.hh>
+#include <dune/python/common/fvector.hh>
+#include <dune/python/common/mpihelper.hh>
+
+#include <dune/python/pybind11/pybind11.h>
+#include <dune/python/pybind11/stl.h>
+
+PYBIND11_MODULE( _common, module )
+{
+ Dune::Python::addToTypeRegistry<double>(Dune::Python::GenerateTypeName("double"));
+ Dune::Python::addToTypeRegistry<int>(Dune::Python::GenerateTypeName("int"));
+ Dune::Python::addToTypeRegistry<std::size_t>(Dune::Python::GenerateTypeName("std::size_t"));
+
+ Dune::Python::registerDynamicVector<double>(module);
+ Dune::Python::registerDynamicMatrix<double>(module);
+
+ int argc = 0;
+ char **argv = NULL;
+ Dune::MPIHelper::instance(argc,argv);
+ Dune::Python::registerCollectiveCommunication(module);
+}
--- /dev/null
+import logging
+import os
+import re
+import subprocess
+
+import dune.common.module
+
+from dune.common.utility import buffer_to_str
+from dune.generator import builder, ConfigurationError
+
+logger = logging.getLogger(__name__)
+
+def assertHave(identifier):
+ '''check if an identifier is defined equal to 1 in the dune-py config.h file.
+ use this to check if for example #define HAVE_DUNE_COMMON 1 is
+ provided the config file by calling
+ assertHave("HAVE_DUNE_COMMON")
+ '''
+ config = os.path.join(dune.common.module.get_dune_py_dir(), "config.h")
+
+ matches = [match for match in [re.match('^[ ]*#define[ ]+' + identifier.strip() + '[ ]+1$', line) for line in open(config)] if match is not None]
+ if not matches:
+ matches = [match for match in [re.match('^[ ]*#define[ ]+' + identifier.strip() + '[ ]+ENABLE', line) for line in open(config)] if match is not None]
+
+ if not matches:
+ raise ConfigurationError(identifier + " is not set in dune-py's config.h")
+ elif matches.__len__() > 1:
+ raise ConfigurationError(identifier + " found multiple times in dune-py's config.h")
+
+def assertCMakeVariable(identifier,value,defaultFails):
+ '''check if a variable in CMakeCache.txt in dune-py is defined and equal to 'value'
+ '''
+ cache = os.path.join(dune.common.module.get_dune_py_dir(), "CMakeCache.txt")
+
+ identifier = identifier.lower().strip()
+ matches = [line.lower() for line in open(cache) if re.match('^[ ]*'+identifier+':+', line.lower()) is not None]
+ if not matches and defaultFails:
+ raise ConfigurationError(identifier + " default behavior is used in dune-py and that is not allowed")
+ elif len(matches) > 1:
+ raise ConfigurationError(identifier + " found multiple times in dune-py's config.h")
+ elif matches:
+ # check for bool on/off type variables:
+ bools = {True:["on","true","1"], False:["off","false","0"]}
+ if not [x for x in bools[value] if x in matches[0]]:
+ raise ConfigurationError(identifier + " dune-py wrongly configured wrt "+identifier)
+
+def preprocessorAssert(tests):
+ '''perform preprocessore checks.
+ A list of checks can be provided each should contain a pair, the
+ first being the preprocessor check to perform (e.g. #if or #ifdef)
+ and the second being the message to print if the check fails. The
+ generated code is of the form:
+ tests[i][0]
+ #else
+ test failed
+ #endif
+ so the first argument of each test has to make this piece of code
+ valid C++ code assuming config.h is included.
+ '''
+ source = "#include <config.h>\nint main() {\n"
+ i = 0
+ for t in tests:
+ source = source + t[0]+"\n#else\nreturn "+str(i+1)+";\n#endif\n"
+ i += 1
+ source = source + "return 0;\n}\n"
+
+ with open(os.path.join(builder.generated_dir, "generated_module.hh"), 'w') as out:
+ out.write(source)
+ builder.compile("generated_test")
+
+ test_args = ["./generated_test"]
+ test = subprocess.Popen(test_args, cwd=builder.generated_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdout, stderr = test.communicate()
+ logger.debug(buffer_to_str(stdout))
+ returncode = test.returncode
+
+ if returncode > 0:
+ logger.debug("failed testing:\n"+source)
+ logger.critical("checking "+tests[returncode-1][0]+" failed: "+tests[returncode-1][1])
+ raise ConfigurationError(tests[returncode-1][1])
--- /dev/null
+print("""WARNING:
+Importing deprecated `dune.common.compatibility'. The Python 2.7 versions
+of the functions defined here were removed and the Python 3+ versions moved
+so use
+ import dune.common.utility
+instead
+""")
+from .utility import *
--- /dev/null
+import logging
+
+class DeprecatedObject(object):
+ def __init__(self, real, message):
+ self.real = real
+ self.logger = logging.getLogger(real.__module__)
+ self.message = message
+
+ def __call__(self, *args, **kwargs):
+ object.__getattribute__(self, "logger").warning(object.__getattribute__(self, "message"))
+ return object.__getattribute__(self, "real")(*args, **kwargs)
+
+ def __getattribute__(self, name):
+ object.__getattribute__(self, "logger").warning(object.__getattribute__(self, "message"))
+ return getattr(object.__getattribute__(self, "real"), name)
+
+ def __repr__(self):
+ object.__getattribute__(self, "logger").warning(object.__getattribute__(self, "message"))
+ return repr(object.__getattribute__(self, "real"))
--- /dev/null
+import hashlib
+
+def hashIt(typeName):
+ if hasattr(typeName, '__iter__'):
+ return hashlib.md5("".join(t for t in typeName).encode('utf-8')).hexdigest()
+ else:
+ return hashlib.md5(typeName.encode('utf-8')).hexdigest()
--- /dev/null
+try:
+ from portalocker import Lock as _Lock
+ from portalocker.constants import LOCK_EX, LOCK_SH
+ class Lock(_Lock):
+ def __init__(self, path, flags, *args, **kwargs):
+ _Lock.__init__(self,path,*args,flags=flags,timeout=None,**kwargs)
+except ModuleNotFoundError:
+ import os
+ import fcntl
+ from fcntl import LOCK_EX, LOCK_SH
+ # file locking from fcntl
+ def lock_file(f, cmd=fcntl.LOCK_EX):
+ fcntl.flock(f, cmd)
+ return f
+ def unlock_file(f):
+ fcntl.flock(f, fcntl.LOCK_UN)
+ return f
+
+ # This file opener *must* be used in a "with" block.
+ class Lock:
+ # Open the file with arguments provided by user. Then acquire
+ # a lock on that file object (WARNING: Advisory locking).
+ def __init__(self, path, flags, *args, **kwargs):
+ # Open the file and acquire a lock on the file before operating
+ self.file = open(path, mode='w+', *args, **kwargs)
+ # Lock the opened file
+ self.file = lock_file(self.file, flags) # flags are either LOCK_EX or LOCK_SH
+
+ # Return the opened file object (knowing a lock has been obtained).
+ def __enter__(self, *args, **kwargs): return self.file
+
+ # Unlock the file and close the file object.
+ def __exit__(self, exc_type=None, exc_value=None, traceback=None):
+ # Flush to make sure all buffered contents are written to file.
+ self.file.flush()
+ os.fsync(self.file.fileno())
+ # Release the lock on the file.
+ self.file = unlock_file(self.file)
+ self.file.close()
+ # Handle exceptions that may have come up during execution, by
+ # default any exceptions are raised to the user.
+ return exc_type == None
--- /dev/null
+from __future__ import absolute_import, division, print_function, unicode_literals
+
+import email.utils
+import io
+import logging
+import os
+import re
+import shlex
+import string
+import subprocess
+import sys
+
+from os.path import expanduser
+
+if __name__ == "dune.common.module":
+ from dune.common.utility import buffer_to_str
+ from dune.common import project
+ from dune.packagemetadata import Version, VersionRequirement,\
+ Description, cmakeFlags, cmakeArguments, inVEnv, get_dune_py_dir
+else:
+ from utility import buffer_to_str
+ import project
+ from packagemetadata import Version, VersionRequirement,\
+ Description, cmakeFlags, cmakeArguments, inVEnv, get_dune_py_dir
+
+logger = logging.getLogger(__name__)
+
+
+def find_modules(path):
+ """find DUNE modules in given path
+
+ Args:
+ path: Iterable containing directories to search modules in
+
+ Returns:
+ List of (description, dir) pairs of found modules.
+ """
+ modules = []
+ for dir in path:
+ for root, dirs, files in os.walk(dir):
+ if 'dune.module' in files:
+ description = Description(os.path.join(root, 'dune.module'))
+ if not description.name == "dune-py":
+ modules.append((description,os.path.abspath(root)))
+ # do not traverse subdirectories
+ # del dirs[:]
+ return modules
+
+
+def resolve_dependencies(modules, module=None, deps=None):
+ """resolve module dependencies
+
+ Args:
+ modules: dictionary mapping module name to description
+ module (optional): name or description of module to resolve dependencies for
+ deps (optional): dictionary mapping module name to an unordered set of its
+ dependency names. This dictionary is extedend.
+
+ Return:
+ If module is given, an unordered set of its dependency names is returned.
+ Otherwise a dictionary mapping module name to such a set is returned (i.e., deps).
+ """
+ if deps is None:
+ deps = dict()
+
+ if module is None:
+ for m in modules:
+ if m not in deps:
+ resolve_dependencies(modules, m, deps)
+ return deps
+
+ if not isinstance(module, Description):
+ module = modules[module]
+
+ def resolve(desc, req):
+ if not req(desc.version):
+ raise ValueError('Module \'' + module.name + '\' requires ' + desc.name + ' ' + str(req) + '.')
+ try:
+ d = deps[desc.name]
+ if d is None:
+ raise ValueError('Module \'' + module.name + '\' has circular dependency on ' + desc.name + '.')
+ return d | {desc.name}
+ except KeyError:
+ return resolve_dependencies(modules, desc, deps) | {desc.name}
+
+ deps[module.name] = None
+ mod_deps = set()
+ for m, r in module.depends:
+ try:
+ mod_deps |= resolve(modules[m], r)
+ except KeyError:
+ raise ValueError('Module \'' + module.name + '\' has missing dependency \'' + m + '\'.')
+ for m, r in module.suggests:
+ try:
+ mod_deps |= resolve(modules[m], r)
+ except KeyError:
+ pass
+ deps[module.name] = mod_deps
+ return mod_deps
+
+
+def resolve_order(deps):
+ """resolve module dependencies
+
+ Args:
+ deps: dictionary mapping module name to its dependency names
+
+ Return:
+ Ordred list of module names such that each module only depends on
+ modules preceeding it.
+ """
+ order = []
+
+ def resolve(m):
+ if m not in order:
+ for d, r in deps[m].depends:
+ if d not in order:
+ resolve(d)
+ order.append(m)
+
+ for m in deps:
+ resolve(m)
+ return order
+
+
+def pkg_config(pkg, var=None, paths=[]):
+ args = ['pkg-config', pkg]
+ if var is not None:
+ args += ['--variable=' + var]
+ env = dict(os.environ)
+ env.update({'PKG_CONFIG_PATH': ':'.join(paths)})
+ pkgconfig = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
+ pkgconfig.wait()
+ prefix = pkgconfig.stdout.read()
+ if pkgconfig.returncode != 0:
+ raise KeyError('package ' + pkg + ' not found.')
+ return buffer_to_str(prefix).strip()
+
+
+def get_prefix(module):
+ paths = get_module_path("pkgconfig")
+ return pkg_config(module, var='prefix', paths=paths)
+
+
+def is_installed(dir, module=None):
+ """check whether a path contains an installed or a source version of a DUNE module
+
+ Args:
+ dir: directory containing the module description file (dune.module)
+ module (optional): name of the module (either str or Description)
+ If omitted, the module description file is parsed for it.
+
+ Returns:
+ True, if the module is installed, false otherwise
+ """
+ if module is None:
+ module = Description(os.path.join(dir, 'dune.module'))
+ if isinstance(module, Description):
+ module = module.name
+ try:
+ prefix = get_prefix(module)
+ except KeyError:
+ return False
+ for l in ['lib','lib32','lib64']:
+ if os.path.realpath(dir) == os.path.realpath(os.path.join(prefix, l, 'dunecontrol', module)):
+ return True
+ return False
+
+def get_cmake_command():
+ try:
+ return os.environ['DUNE_CMAKE']
+ except KeyError:
+ return 'cmake'
+
+def get_local():
+ if inVEnv():
+ return sys.prefix
+ try:
+ home = expanduser("~")
+ return os.path.join(home, '.local')
+ except KeyError:
+ pass
+ return ''
+
+def get_module_path(post="dunecontrol"):
+ path = ['.']
+ # try to guess modules path for unix systems
+ for l in ['lib','lib32','lib64']:
+ path = path + [p for p in [
+ os.path.join('usr','local',l,post),
+ os.path.join('usr',l,post),
+ os.path.join(get_local(),l,post),
+ ]
+ if os.path.isdir(p)]
+ try:
+ path = path + [p for p in os.environ['DUNE_CONTROL_PATH'].split(':') if p and os.path.isdir(p)]
+ if post == 'dunecontrol':
+ logger.debug('Module path [DUNE_CONTROL_PATH]: ' + ':'.join(path))
+ except KeyError:
+ pass
+
+ try:
+ pkg_config_path = [p for p in os.environ['PKG_CONFIG_PATH'].split(':') if p and os.path.isdir(p)]
+ if post == 'dunecontrol':
+ pkg_config_path = [os.path.join(p, '..', post) for p in pkg_config_path]
+ path = path + [p for p in pkg_config_path if os.path.isdir(p)]
+ except KeyError:
+ pass
+ # try to guess module path using pkg-config
+ try:
+ prefix = pkg_config('dune-common', 'prefix').strip()
+ path = path + [p for p in [ os.path.join(prefix, 'lib', post)] if os.path.isdir(p)]
+ path = path + [p for p in [ os.path.join(prefix, 'lib32', post)] if os.path.isdir(p)]
+ path = path + [p for p in [ os.path.join(prefix, 'lib64', post)] if os.path.isdir(p)]
+ if post == 'dunecontrol':
+ logger.debug('Module path [pkg-config]: ' + ':'.join(path))
+ except KeyError:
+ pass
+ if post == 'dunecontrol':
+ logger.debug('Module path [guessed]: ' + ':'.join(path))
+ return path
+
+
+def select_modules(modules=None, module=None):
+ """choose one version of each module from a list of modules
+
+ Args:
+ modules (optional): List of (description, dir) pairs
+ If not given, the find_modules(get_module_path()) is used
+ module (optional):
+
+ Returns:
+ pair of dictionaries mapping module name to unique description and directory respectively
+ """
+ if modules is None:
+ modules = find_modules(get_module_path())
+ desc = {}
+ dir = {}
+ for d, p in modules:
+ p = os.path.realpath(p)
+ n = d.name
+ if n in dir:
+ if p == dir[n]: continue
+ if is_installed(dir[n], n):
+ if is_installed(p, n):
+ foundVersions = " In " + p + " and in " + dir[n]
+ raise KeyError('Multiple installed versions for module \'' + n + '\' found.'+foundVersions)
+ else:
+ desc[n], dir[n] = d, p
+ else:
+ if not is_installed(p, n):
+ foundVersions = " In " + p + " and in " + dir[n]
+ raise KeyError('Multiple source versions for module \'' + n + '\' found.'+foundVersions)
+ else:
+ desc[n], dir[n] = d, p
+ return (desc, dir)
+
+
+def default_build_dir(srcdir, module=None, builddir=None):
+ if builddir is None:
+ builddir = os.environ.get('DUNE_BUILDDIR', 'build-cmake')
+
+ if os.path.isabs(builddir):
+ if module is None:
+ module = Description(os.path.join(srcdir, 'dune.module'))
+ if isinstance(module, Description):
+ module = module.name
+ return os.path.join(builddir, module)
+ else:
+ return os.path.join(srcdir, builddir)
+
+
+def configure_module(srcdir, builddir, prefix_dirs, cmake_args=None):
+ """configure a given module by running CMake
+
+ Args:
+ srcdir: source directory of module
+ builddir: build directory for module (may equal srcdir for in-source builds)
+ prefix_dirs: dictionary mapping dependent modules to their prefix
+ cmake_args (optional): list of additional CMake flags
+
+ Returns:
+ Output of CMake command
+ """
+ args = [ get_cmake_command() ]
+ args += cmakeArguments(cmake_args)
+ args += ['-D' + module + '_DIR=' + dir for module, dir in prefix_dirs.items()]
+ args.append(srcdir)
+ if not os.path.isdir(builddir):
+ os.makedirs(builddir)
+ logger.debug('Calling "' + ' '.join(args) + '"')
+ cmake = subprocess.Popen(args, cwd=builddir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdout, stderr = cmake.communicate()
+ logging.debug(buffer_to_str(stdout))
+ if cmake.returncode != 0:
+ raise RuntimeError(buffer_to_str(stderr))
+ return buffer_to_str(stdout)
+
+
+def get_default_build_args():
+ try:
+ return shlex.split(os.environ['DUNE_BUILD_FLAGS'])
+ except KeyError:
+ pass
+
+ return None
+
+def build_module(builddir, build_args=None):
+ if build_args is None:
+ build_args = get_default_build_args()
+
+ cmake_args = [get_cmake_command(), '--build', '.']
+ if build_args is not None:
+ cmake_args += ['--'] + build_args
+
+ cmake = subprocess.Popen(cmake_args, cwd=builddir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdout, stderr = cmake.communicate()
+ if cmake.returncode != 0:
+ raise RuntimeError(buffer_to_str(stderr))
+ return buffer_to_str(stdout)
+
+def get_dune_py_version():
+ # change this version on the following events:
+ # - a release (major version numbers)
+ # - any incompatible change to the dune-py module (revison number)
+ return Version("2.8.0")
+
+
+def make_dune_py_module(dune_py_dir=None, deps=None):
+ if dune_py_dir is None:
+ dune_py_dir = get_dune_py_dir()
+ os.makedirs(dune_py_dir, exist_ok=True)
+
+ descFile = os.path.join(dune_py_dir, 'dune.module')
+ if not os.path.isfile(descFile):
+ logger.info('Creating new dune-py module in ' + dune_py_dir)
+ # create python/dune/generated
+ generated_dir_rel = os.path.join('python','dune', 'generated')
+ generated_dir = os.path.join(dune_py_dir, generated_dir_rel)
+ if not os.path.isdir(generated_dir):
+ os.makedirs(generated_dir)
+
+ cmake_content = ['add_executable(generated_test EXCLUDE_FROM_ALL generated_test.cc)',
+ 'add_dune_mpi_flags(generated_test)',
+ 'target_compile_definitions(generated_test PRIVATE USING_DUNE_PYTHON)',
+ 'target_link_libraries(generated_test PUBLIC ${DUNE_LIBS})',
+ 'file(WRITE "${CMAKE_CURRENT_BINARY_DIR}/__init__.py")',
+ '',
+ '# The builder will append rules for dynamically generated modules, here']
+ project.write_cmake_file(generated_dir, cmake_content)
+
+ with open(os.path.join(generated_dir, 'generated_test.cc'), 'w') as file:
+ file.write('#include <config.h>\n\n')
+ file.write('#define USING_DUNE_PYTHON 1\n\n')
+ file.write('\n#include "generated_module.hh"\n')
+
+ if deps is None:
+ modules, _ = select_modules()
+ deps = modules.keys()
+
+ description = Description(module='dune-py', version=get_dune_py_version(), maintainer='dune@lists.dune-project.org', depends=list(deps))
+ logger.debug('dune-py will depend on ' + ' '.join([m + (' ' + str(c) if c else '') for m, c in description.depends]))
+ project.make_project(dune_py_dir, description,
+ subdirs=[generated_dir_rel], is_dunepy=True)
+ else:
+ description = Description(descFile)
+ if description.name != 'dune-py':
+ raise RuntimeError('"' + dune_py_dir + '" already contains a different dune module.')
+ if description.version != get_dune_py_version():
+ logger.error('"' + dune_py_dir + '" contains version ' + str(description.version) + ' of the dune-py module, ' + str(get_dune_py_version()) + ' required.')
+ logger.error('If you upgraded dune-python, you can safely remove "' + dune_py_dir + '" and retry.')
+ raise RuntimeError('"' + dune_py_dir + '" contains a different version of the dune-py module.')
+ logger.debug('Using dune-py module in ' + dune_py_dir)
+
+def build_dune_py_module(dune_py_dir=None, cmake_args=None, build_args=None, builddir=None, deps=None, writetagfile=False):
+ if dune_py_dir is None:
+ dune_py_dir = get_dune_py_dir()
+ if cmake_args is None:
+ cmake_args = cmakeFlags()
+
+ modules, dirs = select_modules()
+ if deps is None:
+ deps = resolve_dependencies(modules)
+
+ desc = Description(module='dune-py', version=get_dune_py_version(), maintainer='dune@lists.dune-project.org', depends=list(deps))
+
+ with open(os.path.join(dune_py_dir, 'dune.module'), 'w') as file:
+ file.write(repr(desc))
+
+ # remove cache
+ try:
+ os.remove(os.path.join(dune_py_dir, 'CMakeCache.txt'))
+ except FileNotFoundError:
+ pass
+
+ prefix = {}
+ for name, dir in dirs.items():
+ if is_installed(dir, name):
+ found = False
+ # switch prefix to location of name-config.cmake
+ for l in ['lib','lib32','lib64']:
+ substr = l + '/cmake'
+ newpath = dir.replace('lib/dunecontrol', substr)
+ for _, _, files in os.walk(newpath):
+ # if name-config.cmake is found
+ # then this is the correct folder
+ if name+'-config.cmake' in files:
+ found = True
+ prefix[name] = newpath
+ break
+ if found: break
+ assert found
+ # store new module path
+ else:
+ prefix[name] = default_build_dir(dir, name, builddir)
+
+ logger.info('Configuring dune-py module in ' + dune_py_dir)
+ output = configure_module(dune_py_dir, dune_py_dir, {d: prefix[d] for d in deps}, cmake_args)
+ output += build_module(dune_py_dir, build_args)
+
+ if writetagfile:
+ # set a tag file to avoid automatic reconfiguration in builder
+ tagfile = os.path.join(dune_py_dir, ".noconfigure")
+ f = open(tagfile, 'w')
+ f.close()
+ return output
+
+def getCXXFlags():
+ '''Return the CXXFLAGS used during configuration of dune-py.
+ These are extracted from the CMackeCache.txt file.
+ '''
+ cache = os.path.join(get_dune_py_dir(), "CMakeCache.txt")
+ matches = [match for match in [re.match('DEFAULT_CXXFLAGS:STRING=', line) for line in open(cache)] if match is not None]
+ if not matches:
+ return ''
+ if matches.__len__() > 1:
+ raise ConfigurationError("found multiple entries for CXXFLAGS in CMakeCache.txt")
+ return matches[0].string.partition('=')[2].rstrip()
--- /dev/null
+print("""WARNING:
+Importing deprecated `dune.common.pickle' use
+ import pickle
+instead
+""")
+
+from pickle import *
--- /dev/null
+import os
+
+def write_cmake_file(dir, content=None, subdirs=None, install=None):
+ if not os.path.isdir(dir):
+ raise ValueError('Directory \'' + dir + '\' does not exist.')
+
+ append = []
+ if subdirs is not None:
+ append += ['add_subdirectory("' + d + '")' for d in subdirs]
+
+ if install is not None and install[0]:
+ append += ['install(FILES'] + [' ' + f for f in install[0]] + [' DESTINATION "' + install[1] + '")']
+
+ with open(os.path.join(dir, 'CMakeLists.txt'), 'w') as file:
+ if content is not None:
+ file.write('\n'.join(content) + '\n')
+ if append:
+ file.write('\n'.join(append) + '\n')
+
+
+def write_config_h_cmake(dir, project, public_content=None):
+ if not os.path.isdir(dir):
+ raise ValueError('Directory \'' + dir + '\' does not exist.')
+ project_up = project.upper().replace('-', '_')
+ with open(os.path.join(dir, 'config.h.cmake'), 'w') as file:
+ file.write('/* begin ' + project + '\n put the definitions for config.h specific to\n your project here. Everything above will be\n overwritten\n*/\n\n')
+ file.write('/* begin private */\n')
+ file.write('\n/* Name of package */\n#define PACKAGE "${DUNE_MOD_NAME}"\n')
+ file.write('\n/* Define to the full name of this package */\n#define PACKAGE_NAME "${DUNE_MOD_NAME}"\n')
+ file.write('\n/* Define to the version of this package */\n#define PACKAGE_VERSION "${DUNE_MOD_VERSION}"\n')
+ file.write('\n/* Define to the full name and version of this package */\n#define PACKAGE_STRING "${DUNE_MOD_NAME} ${DUNE_MOD_VERSION}"\n')
+ file.write('\n/* Define to the address where bug reports for this package should be sent */\n#define PACKAGE_BUGREPORT "${DUNE_MAINTAINER}"\n')
+ file.write('\n/* Define to the one symbol short name of this package */\n#define PACKAGE_TARNAME "${DUNE_MOD_NAME}"\n')
+ file.write('\n/* Define to the home page for this package */\n#define PACKAGE_URL "${DUNE_MOD_URL}"\n')
+ file.write('\n/* end private */\n')
+ file.write('\n/* Define to the version of ' + project + ' */\n#define ' + project_up + '_VERSION "${' + project_up + '_VERSION}"\n')
+ file.write('\n/* Define to the major version of ' + project + ' */\n#define ' + project_up + '_VERSION_MAJOR "${' + project_up + '_VERSION_MAJOR}"\n')
+ file.write('\n/* Define to the minor version of ' + project + ' */\n#define ' + project_up + '_VERSION_MINOR "${' + project_up + '_VERSION_MINOR}"\n')
+ file.write('\n/* Define to the revision of ' + project + ' */\n#define ' + project_up + '_VERSION_REVISION "${' + project_up + '_VERSION_REVISION}"\n')
+ if public_content is not None:
+ file.write('\n'.join(public_content) + '\n\n')
+ file.write('\n/* end ' + project + '\n Everything below here will be overwritten.\n*/\n')
+
+
+def make_cmake_modules(dir, description, macros):
+ if not os.path.isdir(dir):
+ raise ValueError('Directory \'' + dir + '\' does not exist.')
+
+ cmake_dir_rel = os.path.join('cmake', 'modules')
+ cmake_dir = os.path.join(dir, cmake_dir_rel)
+ if not os.path.isdir(cmake_dir):
+ os.makedirs(cmake_dir)
+
+ macroFileName = ''.join([word[0].upper() + word[1:] for word in description.name.split('-')]) + 'Macros.cmake'
+ write_cmake_file(cmake_dir, install=([macroFileName], '${DUNE_INSTALL_MODULEDIR}'))
+
+ with open(os.path.join(cmake_dir, macroFileName), 'w') as file:
+ file.write('\n'.join(macros) + '\n')
+ return cmake_dir_rel
+
+
+def make_project(dir, description, subdirs=None, enable_all_packages=True, is_dunepy=False):
+ if not os.path.isdir(dir):
+ raise ValueError('Directory \'' + dir + '\' does not exist.')
+ with open(os.path.join(dir, 'dune.module'), 'w') as file:
+ file.write(repr(description))
+
+ cmake_content = ['cmake_minimum_required(VERSION 3.13)', 'project(' + description.name + ' C CXX)']
+ if is_dunepy:
+ cmake_content += ['', 'set(DUNE_ENABLE_PYTHONBINDINGS ON)']
+ cmake_content += ['',
+ 'if(NOT (dune-common_DIR OR dune-common_ROOT OR "${CMAKE_PREFIX_PATH}" MATCHES ".*dune-common.*"))',
+ ' string(REPLACE ${PROJECT_NAME} dune-common dune-common_DIR ${PROJECT_BINARY_DIR})',
+ 'endif()']
+ cmake_content += ['', 'find_package(dune-common REQUIRED)']
+ if subdirs is not None and 'cmake/modules' in subdirs:
+ cmake_content += ['list(APPEND CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/cmake/modules" "${dune-common_MODULE_PATH}")']
+ else:
+ cmake_content += ['list(APPEND CMAKE_MODULE_PATH ${dune-common_MODULE_PATH})']
+ cmake_content += ['', 'include(DuneMacros)', 'dune_project()']
+ if enable_all_packages:
+ cmake_content += ['dune_enable_all_packages()']
+ if subdirs is not None:
+ cmake_content += [''] + ['add_subdirectory("' + d + '")' for d in subdirs]
+ cmake_content += ['', 'finalize_dune_project(GENERATE_CONFIG_H_CMAKE)']
+ write_cmake_file(dir, cmake_content)
+
+ with open(os.path.join(dir, description.name + '.pc.in'), 'w') as file:
+ file.write('prefix=@prefix@\nexec_prefix=@exec_prefix@\n')
+ file.write('libdir=@libdir@\nincludedir=@includedir@\n')
+ file.write('CXX=@CXX@\nCC=@CC@\n')
+ file.write('DEPENDENCIES=@REQUIRES@\n\n')
+ file.write('Name: @PACKAGE_NAME@\n')
+ file.write('Version: @VERSION@\n')
+ file.write('Description: DUNE module "' + description.name + '"\n')
+ file.write('URL: http://dune-project.org\n')
+ file.write('Requires: ${DEPENDENCIES}\n')
+ file.write('Libs: -L${libdir}\n')
+ file.write('Cflags: -I${includedir}\n')
+
+ write_config_h_cmake(dir, description.name)
--- /dev/null
+import importlib
+import sys
+from inspect import signature
+
+def buffer_to_str(b):
+ return b.decode('utf-8')
+
+def isString(s):
+ return isinstance(s, str)
+
+def isInteger(i):
+ return isinstance(i, int)
+
+reload_module = importlib.reload
+
+def getNumberOfParameters(func):
+ return len( signature(func).parameters )
--- /dev/null
+import importlib
+import sys
+import pkgutil
+import importlib
+import logging
+import inspect
+
+import dune.common.module
+import dune.common
+import dune.generator
+
+logger = logging.getLogger(__name__)
+_create_map = dict()
+
+package = dune
+prefix = package.__name__ + "."
+subpackages = []
+logMsg = "Importing create registries from [ "
+
+# first import all 'dune' subpackages and collect the 'registry' dicts
+for importer, modname, ispkg in pkgutil.iter_modules(package.__path__, prefix):
+ if not ispkg:
+ continue
+
+ # can just use modname here if registry is part of __init__ file
+ try:
+ # Note: modname.__init__ is imported so be aware of
+ # possible side effects
+ module = importlib.import_module(modname)
+ except ImportError as e:
+ logger.debug('failed to import ' + modname + ': ' + str(e) + '.')
+ continue
+
+ # read the registry
+ try:
+ moduleRegistry = module.registry.items()
+ logMsg = logMsg + modname + " "
+ except AttributeError:
+ logger.debug('Module ' + modname + ' does not provide a registry.')
+ continue
+
+ # combine all registries
+ for obj, registry in moduleRegistry:
+ objmap = dict()
+ try:
+ objmap = _create_map[obj]
+ except KeyError:
+ _create_map[obj] = objmap
+ for key, value in registry.items():
+ if key in objmap:
+ raise RuntimeError('Key \'' + key + '\' registered twice for \'' + obj + '\'.')
+ else:
+ objmap[key.upper().lower()] = [value,modname]
+
+# the grids registry also provide view -
+# so we will add them to the 'view' entry
+_create_map.setdefault("view",{}).update(_create_map["grid"])
+
+logMsg = logMsg + "]"
+logger.debug(logMsg)
+
+############################################################################
+def get(category=None,entry=None):
+ entry_ = _create_map.get(category,None)
+ if entry_ is None:
+ if category is not None:
+ print("category '",category,"' not valid,",end="")
+ print("available categories are:\n",
+ ','.join(k for k in sorted(_create_map)))
+ return
+ if entry is None:
+ print("available entries for this category are:")
+ entries = []
+ colLength = [0,0,0]
+ for k,e in entry_.items():
+ n = e[0].__module__.split(".")
+ if n[-1][0]=="_":
+ del n[-1]
+ entries += [ [k,e[0].__name__, '.'.join(m for m in n)] ]
+ colLength[0] = max(colLength[0],len(entries[-1][0]))
+ colLength[1] = max(colLength[1],len(entries[-1][1]))
+ colLength[2] = max(colLength[2],len(entries[-1][2]))
+ entries.sort()
+ print("entry".ljust(colLength[0]),
+ "function".ljust(colLength[1]),
+ "module".ljust(colLength[2]))
+ print("-"*sum(colLength))
+ for e in entries:
+ print(e[0].ljust(colLength[0]),
+ e[1].ljust(colLength[1]),
+ e[2].ljust(colLength[2]))
+ print("-"*sum(colLength))
+ else:
+ entry__ = entry_.get(entry,None)
+ if entry__ is None:
+ print("available entries are:",
+ ','.join(k for k in entry_))
+ return
+ return entry__[0]
+
+############################################################################
+## the second part is for the 'load' method
+
+# a helper class
+class Empty:
+ pass
+
+def signatureDict(func):
+ # get signature from func, we simply fill a dictonary with the name of
+ # all non var argument of the function as key and containing a either
+ # Empty or the default argument provided by the function signature
+
+ ret = {}
+ sig = inspect.signature(func)
+ for p,v in sig.parameters.items():
+ # we only extract positional or keyword argument (i.e. not *args,**kwargs)
+ if v.kind == v.POSITIONAL_OR_KEYWORD:
+ name = v.name
+ default = v.default if not v.default is v.empty else Empty
+ ret.update({name:default})
+ return ret
+
+def _creatorCall(create, usedKeys, *args, **kwargs):
+ # get signature of create function to call
+ signature = signatureDict(create)
+ # check if any of the parameter names correspond to some creator -
+ # if a creator exists for that function name and the value passed in by
+ # the user for that parameter is a string, call the creator otherwise
+ # use the object provided. If no creator exsits use the the value
+ # provided by the user or the default value
+ for name in signature:
+ # special treatment of 'view'/'grid' parameter since a 'grid' is
+ # also a view
+ if name=='view' and not name in kwargs and 'grid' in kwargs:
+ kwargs.update({"view":kwargs["grid"]})
+ usedKeys.update(["grid"])
+ creator = globals().get(name)
+ if creator: # a creator for this parameter name exists
+ assert signature[name] == Empty, "argument in create method corresponding to creatibles should not have default values"
+ argument = kwargs.get(name, Empty)
+ assert not argument == Empty, "required creatable argument " + argument + " not provided"
+ if isinstance(argument,str):
+ # recursion
+ argument = argument.upper().lower()
+ paramCreator = creator.registry[argument][0]
+ signature[name] = _creatorCall(paramCreator, usedKeys, *args,**kwargs)
+ kwargs[name] = signature[name] # replace the string with the actual object
+ else:
+ signature[name] = argument # store the object provided
+ usedKeys.update([name])
+ else: # no creator available
+ argument = kwargs.get(name, Empty)
+ if argument == Empty:
+ assert not signature[name] == Empty, "no value for argument " + name + " provided"
+ kwargs[name] = argument
+ else:
+ signature[name] = argument
+ usedKeys.update([name])
+ return create(**signature)
+
+def creatorCall(self, key, *args, **kwargs):
+ key = key.upper().lower()
+ try:
+ create = self.registry[key][0]
+ except KeyError:
+ raise RuntimeError('No ' + self.obj + ' implementation: ' + key +\
+ '. Available: ' + ' '.join(r for r in self.registry) + '.' )
+ # the complex creation mechanism is only allowed with named arguemtns
+ # if positional arguments have been used, call the original function directly
+ # without further checking the parameters
+ if len(args)>0:
+ return create(*args, **kwargs)
+ else:
+ usedKeys = set()
+ # make a fix here for grids/views
+ if 'grid' in kwargs and not self.obj == 'view' and not 'view' in kwargs:
+ kwargs.update({'view':kwargs['grid']})
+ usedKeys.update(['grid'])
+ instance = _creatorCall(create,usedKeys,*args,**kwargs)
+ assert set(kwargs) == usedKeys, "some provided named parameters where not used"
+ return instance
+
+##########################################################################
+## for each 'registry' entry add a function to this module
+for obj, registry in _create_map.items():
+ # docs = "\n".join(k+" from "+v[1] for k,v in registry.items())
+ docs_format = "{:<25}" * (2)
+ docs = docs_format.format("key", "module") + "\n"
+ docs = docs + docs_format.format("----------", "----------") + "\n"
+ for k,v in registry.items():
+ docs = docs + docs_format.format(k,v[1]) + "\n"
+
+ attribs = {k: staticmethod(v[0]) for k, v in registry.items()}
+ attribs.update({"__call__": creatorCall, "registry": registry, "obj": obj})
+ C = type(str(obj), (object,), attribs)
+ c = C()
+ c.__doc__ = "Create a dune grid instance, available choices are:\n"+docs
+ setattr(sys.modules[__name__], obj, c)
+ logger.debug("added create."+obj+" with keys: \n"+\
+ "\n".join(" "+k+" from subpackage "+v[1] for k,v in registry.items()))
--- /dev/null
+import warnings
+
+def deprecated(msg,name=None):
+ '''This is a decorator which can be used to mark functions
+ as deprecated. It will result in a warning being emitted
+ when the function is used.'''
+ def deprecated_decorator(func):
+ def new_func(*args, **kwargs):
+ if name is None:
+ funcName = func.__name__
+ else:
+ funcName = name
+ if msg:
+ print("Call to deprecated function/property `{}`.".format(funcName),msg)
+ else:
+ print("Call to deprecated function/property `{}`.".format(funcName))
+ warnings.warn("Call to deprecated function {}.".format(funcName),
+ category=DeprecationWarning)
+ # note: DepracationWarning is ignored by default (could use FutureWarning)
+ # also the warning will be only shown once
+ return func(*args, **kwargs)
+ new_func.__name__ = func.__name__
+ new_func.__doc__ = func.__doc__
+ new_func.__dict__.update(func.__dict__)
+ return new_func
+ return deprecated_decorator
--- /dev/null
+add_python_targets(generator
+ __init__
+ algorithm
+ importclass
+ builder
+ exceptions
+ generator
+ )
--- /dev/null
+import os, logging
+
+from dune.common.module import getCXXFlags
+from .exceptions import CompileError, ConfigurationError
+from .builder import Builder
+from . import builder as builderModule
+
+logger = logging.getLogger(__name__)
+
+env_force = os.environ.get('DUNE_FORCE_BUILD', 'FALSE').upper()
+env_save = os.environ.get('DUNE_SAVE_BUILD' , 'FALSE').upper()
+builder = Builder( env_force in ('1', 'TRUE'), env_save )
+
+def setNoDependencyCheck():
+ logger.debug("Switching off dependency check - modules will always be compiled")
+ builderModule.noDepCheck = True
+def setDependencyCheck():
+ logger.debug("Switching on dependency check")
+ builderModule.noDepCheck = False
+def setFlags(flags="-g",noChecks=None):
+ logger.debug("Using compile flags '"+flags+"'")
+ builderModule.cxxFlags = flags
+ if noChecks is True:
+ setNoDependencyCheck()
+ elif noChecks is False:
+ setDependencyCheck()
+def addToFlags(pre="",post="",noChecks=None):
+ setFlags(pre+" "+getCXXFlags()+" "+post,noChecks)
+
+def unsetFlags(noChecks=None):
+ logger.debug("Using compile flags from configuration of dune-py")
+ builderModule.cxxFlags = None
+ if noChecks is True:
+ setNoDependencyCheck()
+ elif noChecks is False:
+ setDependencyCheck()
+def reset():
+ unsetFlags()
+ setDependencyCheck()
+
+def path(f):
+ return os.path.dirname(os.path.realpath(f))+"/"
+
+class Constructor(object):
+ def __init__(self, args, body=None, extra=None):
+ self.args = args
+ self.body = body
+ self.extra = [] if extra is None else extra
+
+ def register(self, cls="cls"):
+ if self.body is None:
+ return cls + ".def( pybind11::init( " + self.args + " )" + "".join(", " + e for e in self.extra) + " );\n"
+ if self.args:
+ source = cls + ".def( pybind11::init( [] ( " + ", ".join(self.args) + " ) {"
+ else:
+ source = cls + ".def( pybind11::init( [] () {"
+ source += "\n ".join(self.body)
+ source += "\n } )" + "".join(", " + e for e in self.extra) + " );\n"
+ return source
+
+ def __str__(self):
+ return self.register()
+
+
+class Method(object):
+ def __init__(self, name, args, body=None, extra=None):
+ self.name = name
+ self.args = args
+ self.body = body
+ if extra is None:
+ self.extra = []
+ else:
+ self.extra = extra
+
+ def register(self, cls="cls"):
+ if self.body is None:
+ return cls + ".def( \"" + self.name + "\", " + self.args + "".join(", " + e for e in self.extra) + " );\n"
+ if self.args:
+ source = cls + ".def(\"" + self.name + "\", [] ( " + ", ".join(self.args) + " ) {"
+ else:
+ source = cls + ".def( \"" + self.name + "\", [] () {"
+ source += "\n ".join(self.body)
+ source += "\n } " + "".join(", " + e for e in self.extra) + " );\n"
+ return source
+
+ def __str__(self):
+ return self.register()
--- /dev/null
+import numpy
+
+from dune.common.hashit import hashIt
+from . import builder
+from dune.common.utility import isString
+
+def cppType(arg):
+ try:
+ t, i = arg._typeName + " &", arg._includes
+ except AttributeError:
+ if isinstance(arg, bool):
+ t, i = "bool", []
+ elif isinstance(arg, int) or isinstance(arg,numpy.intc):
+ t, i = "int", []
+ elif isinstance(arg,numpy.int_):
+ t, i = "long", []
+ elif isinstance(arg,numpy.intp):
+ t, i = "std::size_t", []
+ elif isinstance(arg, float) or isinstance(arg,numpy.float_):
+ t, i = "double", []
+ elif isinstance(arg, numpy.ndarray):
+ dtype = None
+ if arg.dtype.type == numpy.intc:
+ dtype="int"
+ elif arg.dtype.type == numpy.int_:
+ dtype="long"
+ elif arg.dtype.type == numpy.intp:
+ dtype="std::size_t"
+ elif arg.dtype.type == numpy.float_:
+ dtype="double"
+ if dtype is None:
+ t, i = "pybind11::array", ["dune/python/pybind11/numpy.h"]
+ else:
+ t, i = "pybind11::array_t<"+dtype+">", ["dune/python/pybind11/numpy.h"]
+ elif isinstance(arg, str):
+ t, i = "std::string", ["string"]
+ elif callable(arg):
+ t, i = "pybind11::function", ["dune/python/pybind11/pybind11.h"]
+ elif isinstance(arg,tuple) or isinstance(arg,list):
+ t, i = cppType(arg[0])
+ t = "std::vector<"+t+">"
+ i += ["vector"]
+ else:
+ raise Exception("Cannot deduce C++ type for the following argument: " + repr(arg))
+ return t,i
+
+def load(functionName, includes, *args):
+ '''Just in time compile an algorithm.
+
+ Generates binding for a single (template) function. The name of the
+ function and the C++ types of the arguments passed to this function are
+ used to generate a static type used in the bindings. The file(s)
+ containing the code for the function are passed in either as single
+ string or as a list of strings. Note that these files will be copied
+ into the generated module. The file name can include a path name. So in
+ the simples case `includes="header.hh" will include the file from the
+ current working directory. To include a file from the directory
+ containing the calling script use
+ `includes=dune.generator.path(__file__)+"header.hh"`.
+
+ Args:
+ functionName: name of the C++ function to provide bindings for
+ includes: single or list of files to add to the generated module
+ *args: list of arguments that will be passed to the generated module
+
+ Returns:
+ Callalble object
+ '''
+
+ # header guard is added further down
+ source = '#include <config.h>\n\n'
+ source += '#define USING_DUNE_PYTHON 1\n\n'
+ if isString(includes):
+ with open(includes, "r") as include:
+ source += include.read()
+ source += "\n"
+ includes = []
+ elif hasattr(includes,"readable"): # for IOString
+ with includes as include:
+ source += include.read()
+ source += "\n"
+ includes = []
+ elif isinstance(includes, list):
+ for includefile in includes:
+ with open(includefile, "r") as include:
+ source += include.read()
+ source += "\n"
+ includes = []
+
+ argTypes = []
+ for arg in args:
+ t,i = cppType(arg)
+ argTypes.append(t)
+ includes += i
+
+ signature = functionName + "( " + ", ".join(argTypes) + " )"
+ moduleName = "algorithm_" + hashIt(signature) + "_" + hashIt(source)
+
+ # add unique header guard with moduleName
+ source = '#ifndef Guard_'+moduleName+'\n' + \
+ '#define Guard_'+moduleName+'\n\n' + \
+ source
+
+ includes = sorted(set(includes))
+ source += "".join(["#include <" + i + ">\n" for i in includes])
+ source += "\n"
+ source += '#include <dune/python/common/typeregistry.hh>\n'
+ source += '#include <dune/python/pybind11/pybind11.h>\n'
+ source += '\n'
+
+ source += "PYBIND11_MODULE( " + moduleName + ", module )\n"
+ source += "{\n"
+
+ source += " module.def( \"run\", [] ( " + ", ".join([argTypes[i] + " arg" + str(i) for i in range(len(argTypes))]) + " ) {\n"
+ source += " return " + functionName + "( " + ", ".join(["arg" + str(i) for i in range(len(argTypes))]) + " );\n"
+ source += " } );\n"
+
+ source += "}\n"
+ source += "#endif\n"
+
+ return builder.load(moduleName, source, signature).run
+
+
+def run(functionName, includes, *args):
+ '''Just in time compile and run an algorithm.
+
+ For details see the help for `dune.algorithm.load`.
+
+ Args:
+ functionName: name of the C++ function to provide bindings for
+ includes: single or list of files to add to the generated module
+ *args: list of arguments that will be passed to the generated module
+
+ Returns:
+ return value of `functionName(*args)`
+ '''
+ return load(functionName, includes, *args)(*args)
--- /dev/null
+import importlib
+import logging
+import shlex
+import subprocess
+import os
+import sys
+
+from dune.common import comm
+from dune.common.locking import Lock, LOCK_EX,LOCK_SH
+from dune.common.utility import buffer_to_str, isString, reload_module
+from dune.generator.exceptions import CompileError, ConfigurationError
+import dune.common.module
+
+logger = logging.getLogger(__name__)
+cxxFlags = None
+noDepCheck = False
+
+class Builder:
+ def __init__(self, force=False, saveOutput=False):
+ self.force = force
+ self.dune_py_dir = dune.common.module.get_dune_py_dir()
+ os.makedirs(self.dune_py_dir, exist_ok=True)
+
+ if comm.rank == 0:
+ # lock the whole dune-py module exclusively to possibly
+ # generate and then build the module
+ with Lock(os.path.join(self.dune_py_dir, 'lock-module.lock'), flags=LOCK_EX):
+ dune.common.module.make_dune_py_module(self.dune_py_dir)
+ tagfile = os.path.join(self.dune_py_dir, ".noconfigure")
+ if not os.path.isfile(tagfile):
+ dune.common.module.build_dune_py_module(self.dune_py_dir)
+ # create .noconfigure to disable configuration for future calls
+ open(tagfile, 'a').close()
+ else:
+ logger.debug('Using pre configured dune-py module')
+ comm.barrier()
+
+ self.build_args = dune.common.module.get_default_build_args()
+ self.generated_dir = os.path.join(self.dune_py_dir, 'python', 'dune', 'generated')
+ try:
+ dune.__path__._path.insert(0,os.path.join(self.dune_py_dir, 'python', 'dune'))
+ except:
+ dune.__path__.insert(0,os.path.join(self.dune_py_dir, 'python', 'dune'))
+
+ if saveOutput is True or saveOutput.lower() == "write":
+ self.savedOutput = [open("generatorCompiler.out","w+"), open("generatorCompiler.err","w+")]
+ elif saveOutput.lower() == "append":
+ self.savedOutput = [open("generatorCompiler.out","a+"), open("generatorCompiler.err","a+")]
+ elif saveOutput.lower() == "console" or saveOutput.lower() == "terminal":
+ self.savedOutput = [sys.stdout, sys.stderr]
+ else:
+ self.savedOutput = None
+
+ def compile(self, target='all'):
+ cmake_command = dune.common.module.get_cmake_command()
+ cmake_args = [cmake_command, "--build", self.dune_py_dir, "--target", target]
+ make_args = []
+ if self.build_args is not None:
+ make_args += self.build_args
+ if cxxFlags is not None:
+ make_args += ['CXXFLAGS='+cxxFlags]
+ if noDepCheck:
+ make_args += ["-B"]
+
+ if cmake_args != []:
+ cmake_args += ["--"] + make_args
+ cmake = subprocess.Popen(cmake_args, cwd=self.generated_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdout, stderr = cmake.communicate()
+ logger.debug("Compiler output: "+buffer_to_str(stdout))
+ if cmake.returncode > 0:
+ raise CompileError(buffer_to_str(stderr))
+ if self.savedOutput is not None:
+ out = buffer_to_str(stdout)
+ nlines = out.count('\n')
+ if nlines > 1:
+ self.savedOutput[0].write("###############################\n")
+ self.savedOutput[0].write("###" + " ".join(cmake_args)+"\n")
+ if nlines > 0:
+ self.savedOutput[0].write(out)
+ if nlines > 1:
+ self.savedOutput[0].write("\n###############################\n")
+
+ err = buffer_to_str(stderr)
+ nlines = err.count('\n')
+ if nlines > 1:
+ self.savedOutput[1].write("###############################\n")
+ self.savedOutput[1].write("###" + " ".join(cmake_args)+"\n")
+ if nlines > 0:
+ self.savedOutput[1].write(err)
+ if nlines > 1:
+ self.savedOutput[1].write("\n###############################\n")
+
+ def load(self, moduleName, source, pythonName):
+
+ ## TODO replace if rank if something better
+ ## and remove barrier further down
+ if comm.rank == 0:
+ module = sys.modules.get("dune.generated." + moduleName)
+ if module is None:
+ # make sure nothing (compilation, generating and building) is # taking place
+ with Lock(os.path.join(self.dune_py_dir, 'lock-module.lock'), flags=LOCK_EX):
+ # module must be generated so lock the source file
+ with Lock(os.path.join(self.dune_py_dir, 'lock-'+moduleName+'.lock'), flags=LOCK_EX):
+ sourceFileName = os.path.join(self.generated_dir, moduleName + ".cc")
+ line = "dune_add_pybind11_module(NAME " + moduleName + " EXCLUDE_FROM_ALL)"
+ # first check if this line is already present in the CMakeLists file
+ # (possible if a previous script was stopped by user before module was compiled)
+ with open(os.path.join(self.generated_dir, "CMakeLists.txt"), 'r') as out:
+ found = line in out.read()
+ if not os.path.isfile(sourceFileName) or not found:
+ logger.info("Compiling " + pythonName)
+ code = str(source)
+ with open(os.path.join(sourceFileName), 'w') as out:
+ out.write(code)
+ assert os.path.isfile(sourceFileName), "Error in writing module .cc file"
+ if not found:
+ origPos = -1
+ with open(os.path.join(self.generated_dir, "CMakeLists.txt"), 'a') as out:
+ # store original file size
+ origPos = out.tell()
+ out.write(line+"\n")
+ # update build system
+ logger.debug("Rebuilding module")
+ try:
+ self.compile()
+ except: # all exceptions will cause a problem here
+ os.remove(os.path.join(sourceFileName))
+ # remove line from CMakeLists to avoid problems
+ with open(os.path.join(self.generated_dir, "CMakeLists.txt"), 'a') as out:
+ out.truncate(origPos)
+ raise
+ elif isString(source) and not source == open(os.path.join(sourceFileName), 'r').read():
+ logger.info("Compiling " + pythonName + " (updated)")
+ code = str(source)
+ with open(os.path.join(sourceFileName), 'w') as out:
+ out.write(code)
+ else:
+ logger.debug("Loading " + pythonName)
+ line = "dune_add_pybind11_module(NAME " + moduleName + " EXCLUDE_FROM_ALL)"
+ # the CMakeLists file should already include this line
+ with open(os.path.join(self.generated_dir, "CMakeLists.txt"), 'r') as out:
+ found = line in out.read()
+ assert found, "CMakeLists file does not contain an entry to build"+moduleName
+ # end of exclusive dune-py lock
+
+ # for compilation a shared lock is enough
+ with Lock(os.path.join(self.dune_py_dir, 'lock-module.lock'), flags=LOCK_SH):
+ # lock generated module
+ with Lock(os.path.join(self.dune_py_dir, 'lock-'+moduleName+'.lock'), flags=LOCK_EX):
+ logger.debug("Now compiling "+moduleName)
+ self.compile(moduleName)
+ ## end if module is not None
+
+ ## TODO remove barrier here
+ comm.barrier()
+ module = importlib.import_module("dune.generated." + moduleName)
+
+ if self.force:
+ logger.info("Reloading " + pythonName)
+ module = reload_module(module)
+
+ return module
--- /dev/null
+class CompileError(Exception):
+ '''raise this when there's a problem compiling an extension module'''
+ def __init__(self, error):
+ Exception.__init__(self,error)
+class ConfigurationError(Exception):
+ '''raise this when there's a problem with the configuration of dune-py'''
+ def __init__(self, error):
+ Exception.__init__(self,error)
--- /dev/null
+""" Generator module:
+
+ The module provides the main class for on the fly generation of pybind11
+ Python wrappers for implementations of a gives interface. The necessary
+ details for each implementation (the C++ typedef and the includes) are
+ provided by python dictonaries stored in files.
+"""
+
+import logging
+
+from . import builder
+from dune.common.hashit import hashIt
+
+logger = logging.getLogger(__name__)
+
+class SimpleGenerator(object):
+ def __init__(self, typeName, namespace, pythonname=None, filename=None):
+ if not (isinstance(typeName,list) or isinstance(typeName,tuple)):
+ self.single = True
+ typeName = [typeName]
+ else:
+ self.single = False
+ self.typeName = typeName
+ if namespace:
+ self.namespace = namespace+"::"
+ else:
+ self.namespace = ""
+ if pythonname is None:
+ self.pythonName = typeName
+ else:
+ self.pythonName = pythonname
+ self.fileName = filename
+
+ def pre(self, includes, duneType, moduleName, defines=None, preamble=None):
+ if defines is None: defines = []
+ source = '#ifndef Guard_' + moduleName + '\n'
+ source += '#define Guard_' + moduleName + '\n\n'
+ source += '#include <config.h>\n\n'
+ source += '#define USING_DUNE_PYTHON 1\n\n'
+ source += ''.join(["#define " + d + "\n" for d in defines])
+ source += ''.join(["#include <" + i + ">\n" for i in includes])
+ source += '\n'
+ source += '#include <dune/python/common/typeregistry.hh>\n'
+ source += '#include <dune/python/pybind11/pybind11.h>\n'
+ source += '#include <dune/python/pybind11/stl.h>\n'
+ source += '\n'
+
+ if self.fileName is not None:
+ with open(self.fileName, "r") as include:
+ source += include.read()
+ source += "\n"
+ if preamble is not None:
+ source += preamble
+ source += "\n"
+
+ if self.namespace == "":
+ source += "void register" + self.typeName[0] + "( ... ) {}\n"
+ source += "PYBIND11_MODULE( " + moduleName + ", module )\n"
+ source += "{\n"
+ return source
+
+ def main(self, nr, includes, duneType, *args,
+ options=None, bufferProtocol=False, dynamicAttr=False,
+ holder="default",
+ baseClasses=None ):
+ if options is None: options=[]
+ if baseClasses is None: baseClasses=[]
+ source = " using pybind11::operator\"\"_a;\n"
+ if not bufferProtocol: # kwargs.get("bufferProtocol", False):
+ clsParams = []
+ else:
+ clsParams = ['pybind11::buffer_protocol()']
+ if dynamicAttr:
+ clsParams += ['pybind11::dynamic_attr()']
+
+ if nr == 0:
+ source += ' pybind11::module cls0 = module;\n'
+
+ source += ' {\n'
+ source += " using DuneType = " + duneType + ";\n"
+
+ for i, bc in enumerate(baseClasses):
+ if not holder == "default":
+ baseHolder = "," + holder + "<" + bc + ">"
+ else:
+ baseHolder = ''
+ source += 'Dune::Python::insertClass' +\
+ '< ' + bc + baseHolder + '>' +\
+ '( module, "cls' + str(i) + '"' +\
+ ', Dune::Python::GenerateTypeName("' + bc + '")' +\
+ ', Dune::Python::IncludeFiles{}' +\
+ ");\n"
+ options.append(bc)
+
+ if not holder == "default":
+ options += [holder + "<" + className + ">"]
+
+ source += ' auto cls = Dune::Python::insertClass' +\
+ '< DuneType' +\
+ ', '.join(('',)+tuple(options)) + ' >' +\
+ '( cls0, "' + self.pythonName[nr] + '"' +\
+ ','.join(('',)+tuple(clsParams)) +\
+ ', Dune::Python::GenerateTypeName("' + duneType + '")' +\
+ ', Dune::Python::IncludeFiles{' + ','.join(['"' + i + '"' for i in includes]) + '}' +\
+ ").first;\n"
+ source += " " + self.namespace + "register" + self.typeName[nr] + "( cls0, cls );\n"
+
+ for arg in args:
+ if arg:
+ source += "".join(" " + s + "\n" for s in str(arg).splitlines())
+ source += ' }\n'
+ return source
+
+ def post(self, moduleName, source, postscript):
+ if postscript:
+ source += postscript
+ source += "}\n"
+ source += '#endif'
+ module = builder.load(moduleName, source, self.typeName[0])
+ return module
+
+ def load(self, includes, typeName, moduleName, *args,
+ defines=None, preamble=None, postscript=None,
+ options=None, bufferProtocol=False, dynamicAttr=False,
+ baseClasses=None ):
+ if defines is None: defines = []
+ if options is None: options = []
+ if baseClasses is None: baseClasses = []
+ if self.single:
+ typeName = (typeName,)
+ options = (options,)
+ bufferProtocol = (bufferProtocol,)
+ dynamicAttr = (dynamicAttr,)
+ args = (args,)
+ baseClasses = (baseClasses,)
+ else:
+ if len(args) == 0:
+ args=((),)*2
+ else:
+ args = args[0]
+ if len(options) == 0:
+ options = ((),)*len(typeName)
+ if len(baseClasses) == 0:
+ baseClasses = ((),)*len(typeName)
+ if not bufferProtocol:
+ bufferProtocol = (False,)*len(typeName)
+ if not dynamicAttr:
+ dynamicAttr = (False,)*len(typeName)
+ if isinstance(includes[0],tuple) or isinstance(includes[0],list):
+ allIncludes = [item for sublist in includes for item in sublist]
+ includes = includes[0]
+ else:
+ allIncludes = includes
+ allIncludes = sorted(set(allIncludes))
+ includes = sorted(set(includes))
+ source = self.pre(allIncludes, typeName[0], moduleName, defines, preamble)
+ for nr, (tn, a, o, b, d, bc) in enumerate( zip(typeName, args, options, bufferProtocol, dynamicAttr, baseClasses) ):
+ source += self.main(nr, includes, tn, *a, options=o,
+ bufferProtocol=b, dynamicAttr=d,
+ baseClasses=bc)
+ return self.post(moduleName, source, postscript)
+
+def simpleGenerator(inc, baseType, namespace, pythonname=None, filename=None):
+ generator = SimpleGenerator(baseType, namespace, pythonname, filename)
+ def load(includes, typeName, *args):
+ includes = includes + inc
+ moduleName = namespace + "_" + baseType + "_" + hashIt(typeName)
+ return generator.load(includes, typeName, moduleName, *args)
+ return load
--- /dev/null
+import os
+
+from dune.common.hashit import hashIt
+from . import builder
+from dune.common.utility import isString
+from dune.generator.algorithm import cppType
+
+def load(className, includeFiles, *args,
+ options=None, bufferProtocol=False, dynamicAttr=False,
+ holder="default",
+ baseClasses=None ):
+ if options is None: options=[]
+ if baseClasses is None: baseClasses=[]
+ if not bufferProtocol: # kwargs.get("bufferProtocol", False):
+ clsParams = []
+ else:
+ clsParams = ['pybind11::buffer_protocol()']
+ if dynamicAttr:
+ clsParams += ['pybind11::dynamic_attr()']
+
+ source = '#include <config.h>\n\n'
+ source += '#define USING_DUNE_PYTHON 1\n\n'
+ includes = []
+ if isString(includeFiles) or hasattr(includeFiles,"readable"):
+ includeFiles = [includeFiles]
+ for includefile in includeFiles:
+ if isString(includefile):
+ if not os.path.dirname(includefile):
+ with open(includefile, "r") as include:
+ source += include.read()
+ source += "\n"
+ else:
+ source += "#include <"+includefile+">\n"
+ includes += [includefile]
+ elif hasattr(includefile,"readable"): # for IOString
+ with includefile as include:
+ source += include.read()
+ source += "\n"
+
+ argTypes = []
+ for arg in args:
+ t,i = cppType(arg)
+ argTypes.append(t)
+ includes += i
+
+ signature = className + "( " + ", ".join(argTypes) + " )"
+ moduleName = "class_" + hashIt(signature) + "_" + hashIt(source)
+
+ includes = sorted(set(includes))
+
+ includes += ["python/dune/generated/"+moduleName+".cc"]
+
+ source += "".join(["#include <" + i + ">\n" for i in includes])
+ source += "\n"
+ source += '#include <dune/python/common/typeregistry.hh>\n'
+ source += '#include <dune/python/pybind11/pybind11.h>\n'
+ source += '\n'
+
+ source += "PYBIND11_MODULE( " + moduleName + ", module )\n"
+ source += "{\n"
+
+
+ for i, bc in enumerate(baseClasses):
+ if not holder == "default":
+ baseHolder = ", " + holder + "<" + bc + ">"
+ else:
+ baseHolder = ''
+ source += 'Dune::Python::insertClass' +\
+ '< ' + bc + baseHolder + '>' +\
+ '( module, "cls' + str(i) + '"' +\
+ ', Dune::Python::GenerateTypeName("' + bc + '")' +\
+ ', Dune::Python::IncludeFiles{}' +\
+ ");\n"
+ options.append(bc)
+
+ if not holder == "default":
+ options += [holder + "<" + className + ">"]
+
+ source += "auto cls = Dune::Python::insertClass< "+className+\
+ ', '.join(('',)+tuple(options)) + ' >('+\
+ "module, \"cls\","+','.join(('',)+tuple(clsParams))+\
+ "Dune::Python::GenerateTypeName(\""+className+"\"),"+\
+ "Dune::Python::IncludeFiles{"+",".join(["\""+f+"\"" for f in includes])+"}).first;\n"
+
+ ctorArgs = ", ".join([argTypes[i] + " arg" + str(i) for i in range(len(argTypes))])
+ source += "cls.def( pybind11::init( [] ( "+ctorArgs+" ) {\n"
+ source += "return new "+className+"( "+",".join(["arg"+str(i) for i in range(len(argTypes))]) +"); \n"
+ source += "})"+" ".join([", pybind11::keep_alive< 1, {} >()".format(i+2) for i in range(len(argTypes))]) + "\n"
+ source += ");\n"
+ source += "}"
+
+ source = "#ifndef def_"+moduleName+\
+ "\n#define def_"+moduleName+"\n"+\
+ source+\
+ "\n#endif\n"
+
+ return builder.load(moduleName, source, signature).cls(*args)
--- /dev/null
+#!/usr/bin/env python3
+
+from setuptools import find_packages
+import sys, os, io, getopt, re, ast
+import shlex
+import importlib, subprocess
+import email.utils
+import pkg_resources
+from datetime import date
+
+class Version:
+ def __init__(self, s):
+ if s is None:
+ self.major = 0
+ self.minor = 0
+ self.revision = 0
+ elif isinstance(s, Version):
+ self.major = s.major
+ self.minor = s.minor
+ self.revision = s.revision
+ else:
+ match = re.match('(?P<major>[0-9]+)[.](?P<minor>[0-9]+)([.](?P<revision>[0-9]+))?', s)
+ if not match:
+ raise ValueError('Invalid version: \'' + s + '\'.')
+ self.major = int(match.group('major'))
+ self.minor = int(match.group('minor'))
+ self.revision = int(match.group('revision')) if match.group( 'revision' ) else 0
+
+ def __str__(self):
+ return str(self.major) + '.' + str(self.minor) + '.' + str(self.revision)
+
+ def as_tuple(self):
+ return (self.major, self.minor, self.revision)
+
+ def __eq__(self, other):
+ return self.as_tuple() == other.as_tuple()
+
+ def __ne__(self, other):
+ return self.as_tuple() != other.as_tuple()
+
+ def __lt__(self, other):
+ return self.as_tuple() < other.as_tuple()
+
+ def __le__(self, other):
+ return self.as_tuple() <= other.as_tuple()
+
+ def __gt__(self, other):
+ return self.as_tuple() > other.as_tuple()
+
+ def __ge__(self, other):
+ return self.as_tuple() >= other.as_tuple()
+
+
+class VersionRequirement:
+ def __init__(self, s):
+ if s:
+ match = re.match('(?P<operator>(>|>=|==|<=|<))\s*(?P<version>[0-9.]+)', s)
+ if not match:
+ raise ValueError('Invalid version qualifier: \'' + s + '\'.')
+ self.version = Version(match.group('version'))
+ operator = match.group('operator')
+
+ if operator == '>':
+ self.operator = Version.__gt__
+ elif operator == '>=':
+ self.operator = Version.__ge__
+ elif operator == '==':
+ self.operator = Version.__eq__
+ elif operator == '<=':
+ self.operator = Version.__le__
+ elif operator == '<':
+ self.operator = Version.__lt__
+ else:
+ raise ValueError('Invalid comparison operator: \'' + operator + '\'.')
+ else:
+ self.operator = lambda a, b : True
+ self.version = None
+
+ def __bool__(self):
+ return self.version is not None
+
+ __nonzero__ = __bool__
+
+ def __call__(self, version):
+ return self.operator(version, self.version)
+
+ def __repr__(self):
+ return str(self)
+
+ def __str__(self):
+ if self.operator == Version.__gt__:
+ return '(> ' + str(self.version) + ')'
+ elif self.operator == Version.__ge__:
+ return '(>= ' + str(self.version) + ')'
+ elif self.operator == Version.__eq__:
+ return '(== ' + str(self.version) + ')'
+ elif self.operator == Version.__le__:
+ return '(<= ' + str(self.version) + ')'
+ elif self.operator == Version.__lt__:
+ return '(< ' + str(self.version) + ')'
+ else:
+ return ''
+
+class Description:
+ def __init__(self, fileName=None, **kwargs):
+ data = kwargs.copy()
+
+ valid_entries = ['Module','Maintainer','Version','Maintainer',
+ 'Depends','Suggests','Python-Requires',
+ 'Whitespace-Hook',
+ 'Author','Description','URL']
+
+ if fileName is not None:
+ with io.open(fileName, 'r', encoding='utf-8') as file:
+ import re
+ for line in file:
+ line = line.strip()
+ if not line or line[ 0 ] == '#':
+ continue
+ m = re.search(r'^([a-zA-Z0-9-_]+):(.*)', line)
+ if m:
+ key = m.group(1)
+ val = m.group(2)
+ if not key in valid_entries:
+ raise ValueError('Invalid dune.module entry %s (%s).' % (key,fileName))
+ data[key.lower()] = val.strip()
+ try:
+ self.name = data['module']
+ except KeyError:
+ raise KeyError('Module description does not contain module name.')
+
+ self.versionstring = data.get('version')
+ self.version = Version(data.get('version'))
+
+ try:
+ self.maintainer = email.utils.parseaddr(data['maintainer'])
+ if not self.maintainer[1]:
+ raise ValueError('Module description contains invalid maintainer e-mail address.')
+ except KeyError:
+ self.maintainer = None
+
+ try:
+ self.author = data['author']
+ except KeyError:
+ self.author = None
+
+ try:
+ self.description = data['description']
+ except KeyError:
+ self.description = ''
+
+ try:
+ self.url = data['url']
+ except KeyError:
+ self.url = None
+
+ try:
+ wshook = data['whitespace-hook'].lower()
+ if wshook == 'yes':
+ self.whitespace_hook = True
+ elif wshook == 'no':
+ self.whitespace_hook = False
+ else:
+ raise ValueError('Invalid value for whitespace-hook: ' + wshook + '.')
+ except KeyError:
+ self.whitespace_hook = None
+
+ def parse_deps(s):
+ deps = []
+ if isinstance(s, list):
+ for m in s:
+ if isinstance(m, Description):
+ deps.append((m.name, VersionRequirement(None)))
+ else:
+ deps.append((m, VersionRequirement(None)))
+ else:
+ while s:
+ match = re.match('(?P<module>[a-zA-Z0-9_\-]+)(\s*\((?P<version>[^)]*)\))?((?P<pyversion>[^\s]*))?', s)
+ if not match:
+ raise ValueError('Invalid dependency list.')
+ deps.append((match.group('module'), VersionRequirement(match.group('version') or match.group('pyversion'))))
+ s = s[match.end():].strip()
+ return deps
+
+ self.depends = parse_deps(data.get('depends'))
+ self.suggests = parse_deps(data.get('suggests'))
+ self.python_requires = parse_deps(data.get('python-requires'))
+
+ def __repr__(self):
+ s = 'Module: ' + self.name + '\n'
+ s += 'Version: ' + str(self.version) + '\n'
+ if self.maintainer is not None:
+ s += 'Maintainer: ' + email.utils.formataddr(self.maintainer) + '\n'
+ if self.author is not None:
+ s += 'Author: ' + self.author + '\n'
+ if self.description != '':
+ s += 'Description: ' + self.description + '\n'
+ if self.url is not None:
+ s += 'URL: ' + self.url + '\n'
+ if self.whitespace_hook is not None:
+ s += 'Whitespace-Hook: ' + ('Yes' if self.whitespace_hook else 'No') + '\n'
+
+ def print_deps(deps):
+ return ' '.join([m + (' ' + str(c) if c else '') for m, c in deps])
+
+ if self.depends:
+ s += 'Depends: ' + print_deps(self.depends) + '\n'
+ if self.suggests:
+ s += 'Suggests: ' + print_deps(self.suggests) + '\n'
+ if self.python_requires:
+ s += 'Python-Requires: ' + print_deps(self.python_requires) + '\n'
+ return s
+
+ def __str__(self):
+ return self.name + " (" + str(self.version) + ")"
+
+
+class Data:
+ def __init__(self, version=None):
+ description = Description('dune.module')
+ self.name = description.name
+ self.version = version or description.versionstring
+ self.author_email = description.maintainer[1]
+ self.author = description.author or self.author_email
+ self.description = description.description
+ self.url = description.url
+ self.depends = description.depends
+ self.suggests = description.suggests
+ self.python_requires = description.python_requires
+
+ # if -git version parameter specified, append devDATE to version number for all DUNE modules
+ if self.version.find('git') or version is not None:
+ if version is None:
+ major = self.version.split('-')[0]
+ self.version = Version(major).__str__() + '.dev' + date.today().strftime('%Y%m%d')
+ self.depends = [(dep[0], '(<= '+self.version+')') for dep in self.depends]
+ self.python_requires = [((pr[0], '(<= '+self.version+')') if pr[0].startswith('dune-') else pr) for pr in self.python_requires]
+
+ def asPythonRequirementString(self, requirements):
+ return [(r[0]+str(r[1])).replace("("," ").replace(")","").replace(" ","") for r in requirements]
+
+def cmakeArguments(cmakeArgs):
+ if cmakeArgs is None:
+ return []
+ elif isinstance(cmakeArgs, list):
+ return cmakeArgs
+ elif isinstance(cmakeArgs, dict):
+ args = ['-D' + key + '=' + value + '' for key, value in cmakeArgs.items() if value]
+ args += [key + '' for key, value in cmakeArgs.items() if not value]
+ return args
+ else:
+ raise ValueError('definitions must be a list or a dictionary.')
+
+def cmakeFlags():
+ # defaults
+ flags = cmakeArguments(dict([
+ ('CMAKE_BUILD_TYPE','Release'),
+ ('CMAKE_INSTALL_RPATH_USE_LINK_PATH','TRUE'),
+ ('DUNE_ENABLE_PYTHONBINDINGS','TRUE'),
+ ('DUNE_PYTHON_INSTALL_LOCATION','none'),
+ ('ALLOW_CXXFLAGS_OVERWRITE','ON'),
+ ('CMAKE_DISABLE_FIND_PACKAGE_LATEX','TRUE'),
+ ('CMAKE_DISABLE_FIND_PACKAGE_Doxygen','TRUE'),
+ ('INKSCAPE','FALSE')
+ ]))
+ # test environment for additional flags
+ cmakeFlags = os.environ.get('DUNE_CMAKE_FLAGS')
+ # split cmakeFlags and add them to flags
+ if cmakeFlags is not None:
+ flags += shlex.split(cmakeFlags)
+ cmakeFlags = os.environ.get('CMAKE_FLAGS')
+ if cmakeFlags is not None:
+ flags += shlex.split(cmakeFlags)
+ return flags
+
+def inVEnv():
+ # check whether we are in a anaconda environment
+ # were the checks based on prefix and base_prefix
+ # seem to fail
+ if "CONDA_DEFAULT_ENV" in os.environ:
+ return 1
+
+ # If sys.real_prefix exists, this is a virtualenv set up with the virtualenv package
+ real_prefix = hasattr(sys, 'real_prefix')
+ if real_prefix:
+ return 1
+ # If a virtualenv is set up with pyvenv, we check for equality of base_prefix and prefix
+ if hasattr(sys, 'base_prefix'):
+ return (sys.prefix != sys.base_prefix)
+ # If none of the above conditions triggered, this is probably no virtualenv interpreter
+ return 0
+
+def get_dune_py_dir():
+ try:
+ basedir = os.path.realpath( os.environ['DUNE_PY_DIR'] )
+ basedir = os.path.join(basedir,'dune-py')
+ return basedir
+ except KeyError:
+ pass
+
+ # test if in virtual env
+ if inVEnv():
+ virtualEnvPath = sys.prefix
+ return os.path.join(virtualEnvPath, '.cache', 'dune-py')
+
+ # generate in home directory
+ try:
+ home = os.path.expanduser("~")
+ return os.path.join(home, '.cache', 'dune-py')
+ except KeyError:
+ pass
+
+ raise RuntimeError('Unable to determine location for dune-py module. Please set the environment variable "DUNE_PY_DIR".')
+
+def metaData(version=None, dependencyCheck=True):
+ data = Data(version)
+
+ cmake_flags = cmakeFlags()
+
+ # check if all dependencies are listed in pyproject.toml
+ if dependencyCheck:
+ try:
+ with io.open('pyproject.toml', 'r', encoding='utf-8') as f:
+ for line in f:
+ if 'requires' in line:
+ line = line.split('=',maxsplit=1)[1].strip()
+ modules = ast.literal_eval(line)
+ modules = [x for x in modules
+ if x not in ["setuptools", "wheel", "scikit-build", "cmake", "ninja", "requests"]
+ ]
+ for dep in data.depends:
+ if not any([mod.startswith(dep[0]) for mod in modules]):
+ raise RuntimeError("""
+ pyproject.toml file does not contain all required dune projects defined in the
+ dune.module file: """ + dep[0])
+
+ except IOError:
+ pass
+
+ install_requires = data.asPythonRequirementString(data.python_requires + data.depends)
+
+ try:
+ with open("README.md", "r") as fh:
+ long_description = fh.read()
+ except FileNotFoundError:
+ try:
+ with open("README", "r") as fh:
+ long_description = fh.read()
+ except FileNotFoundError:
+ long_description = 'No long description available for this package'
+ print("Warning: no README[.md] file found so providing a default 'long_description' for this package")
+
+ setupParams = {
+ "name":data.name,
+ "version":data.version,
+ "author":data.author,
+ "author_email":data.author_email,
+ "description":data.description,
+ "long_description":long_description,
+ "long_description_content_type":"text/markdown",
+ "url":data.url if data.url is not None else '',
+ "classifiers":[
+ "Programming Language :: C++",
+ "Programming Language :: Python :: 3",
+ "License :: OSI Approved :: GNU General Public License (GPL)",
+ ],
+ "cmake_args":cmake_flags
+ }
+ if os.path.isdir('python'):
+ setupParams.update({
+ "packages":find_packages(where="python"),
+ "package_dir":{"": "python"},
+ "install_requires":install_requires,
+ "python_requires":'>=3.4',
+ })
+
+ from skbuild.command.build_py import build_py
+ class DunepyConfigure(build_py):
+ def run(self):
+ build_py.run(self)
+ subprocess.call([sys.executable, '-m', 'dune', 'configure'])
+
+ setupParams['cmdclass'] = {
+ 'build_py': DunepyConfigure,
+ }
+
+ return data, setupParams
--- /dev/null
+import matplotlib
+from matplotlib import pyplot
+import numpy as np
+from numpy import amin, amax, linspace, linalg, random
+from matplotlib.collections import PolyCollection
+import os
+
+try:
+ s = os.environ['DUNEPY_BLOCK_PLOTTING']
+ block = s in ['TRUE','true', '1', 't', 'y', 'yes']
+except KeyError:
+ block = True
+try:
+ s = os.environ['DUNEPY_DISABLE_PLOTTING']
+ disable = s in ['TRUE','true', '1', 't', 'y', 'yes']
+except KeyError:
+ disable = False
+block = block and (not disable)
+
+
+def _plotGrid(fig, grid, gridLines="black"):
+ for p in grid.polygons():
+ coll = PolyCollection(p, facecolor='none', edgecolor=gridLines, linewidth=0.5, zorder=2)
+ pyplot.gca().add_collection(coll)
+
+
+def plotGrid(grid, gridLines="black", figure=None,
+ xlim=None, ylim=None, figsize=None):
+ if disable: return
+
+ if figure is None:
+ figure = pyplot.figure(figsize=figsize)
+ show = True
+ else:
+ try:
+ subPlot = figure[1]
+ figure = figure[0]
+ pyplot.subplot(subPlot)
+ except:
+ pass
+ show = False
+
+ _plotGrid(figure, grid, gridLines=gridLines)
+
+ figure.gca().set_aspect('equal')
+ figure.gca().autoscale()
+ if xlim:
+ figure.gca().set_xlim(xlim)
+ if ylim:
+ figure.gca().set_ylim(ylim)
+
+ if show:
+ pyplot.show(block=block)
+
+
+def _plotData(fig, grid, solution, level=0, gridLines="black",
+ component=None, vectors=None, nofVectors=None,
+ xlim=None, ylim=None, clim=None, cmap=None, colorbar=True,
+ on="cell"):
+
+ if (gridLines is not None) and (gridLines != ""):
+ _plotGrid(fig, grid, gridLines=gridLines)
+
+ if solution is not None:
+ if on == "points":
+ assert not any(gt.isNone for gt in grid.indexSet.types(0)), "Can't plot point data with polygonal grids, use `on=\"cells\" in plotting command"
+ triangulation = grid.triangulation(level)
+ data = solution.pointData(level)
+ try:
+ x1 = vectors[0]
+ x2 = vectors[1]
+ if x1 >= solution.dimRange or x2 >= solution.dimRange:
+ vectors = None
+ except:
+ vectors = None
+
+ if not vectors == None:
+ if nofVectors==0:
+ nofVector = len(triangulation.x)
+ idx = random.randint(len(triangulation.x),size=nofVectors)
+ pyplot.quiver(triangulation.x[idx], triangulation.y[idx],
+ data[idx][:,x1], data[idx][:,x2],
+ units='xy', scale=10., zorder=3, color='blue',
+ width=0.007, headwidth=3., headlength=4.)
+ else:
+ if component is None:
+ if solution.dimRange > 1:
+ data = linalg.norm(data,axis=1)
+ else:
+ data = data[:,0]
+ else:
+ data = data[:,component]
+ minData = amin(data)
+ maxData = amax(data)
+ if clim == None:
+ clim = [minData, maxData]
+ if clim[0] > minData and clim[1] < maxData:
+ extend = 'both'
+ elif clim[0] > minData:
+ extend = 'min'
+ elif clim[1] < maxData:
+ extend = 'max'
+ else:
+ extend = 'neither'
+ norm = matplotlib.colors.Normalize(vmin=clim[0], vmax=clim[1])
+ levels = linspace(clim[0], clim[1], 256, endpoint=True)
+ pyplot.tricontourf(triangulation, data, cmap=cmap, levels=levels,
+ extend=extend, norm=norm)
+
+ if colorbar is not None and colorbar:
+ if isinstance(colorbar,bool):
+ colorbar="vertical"
+ # having extend not 'both' does not seem to work (needs fixing)...
+ v = linspace(clim[0], clim[1], 10, endpoint=True)
+ cbar = pyplot.colorbar(orientation=colorbar,shrink=1.0, ticks=v)
+ cbar.ax.tick_params(labelsize=18)
+ else:
+ if not vectors is None: raise ValueError("polygonal data can not plot vector")
+ polys, values = solution.polygonData()
+ data = []
+ for vv in values:
+ for v in vv:
+ if component is None:
+ if solution.dimRange > 1:
+ d = linalg.norm(v)
+ else:
+ d = v[0]
+ else:
+ d = v[component]
+ data += [d]
+ mind = amin(d)
+ maxd = amax(d)
+ data = np.array(data)
+ minData = amin(data)
+ maxData = amax(data)
+ if clim == None:
+ clim = [minData, maxData]
+ vert = []
+ for p in polys:
+ for q in p:
+ vert.append(q)
+ coll = PolyCollection(vert, array=data, edgecolor=gridLines, linewidth=0.5, zorder=2, cmap=cmap)
+ pyplot.gca().add_collection(coll)
+ if colorbar is not None and colorbar:
+ if isinstance(colorbar,bool):
+ colorbar="vertical"
+ # having extend not 'both' does not seem to work (needs fixing)...
+ if clim[0] > minData and clim[1] < maxData:
+ extend = 'both'
+ elif clim[0] > minData:
+ extend = 'min'
+ elif clim[1] < maxData:
+ extend = 'max'
+ else:
+ extend = 'neither'
+ v = linspace(clim[0], clim[1], 10, endpoint=True)
+ norm = matplotlib.colors.Normalize(vmin=clim[0], vmax=clim[1])
+ cbar = pyplot.colorbar(coll, orientation=colorbar,shrink=1.0,
+ # extend=extend, norm=norm,
+ ticks=v)
+ cbar.ax.tick_params(labelsize=18)
+
+ fig.gca().set_aspect('equal')
+ fig.gca().autoscale()
+ if xlim:
+ fig.gca().set_xlim(xlim)
+ if ylim:
+ fig.gca().set_ylim(ylim)
+
+
+def plotPointData(solution, level=0, gridLines="black",
+ vectors=None, nofVectors=None, figure=None,
+ xlim=None, ylim=None, clim=None, figsize=None, cmap=None,
+ colorbar=True):
+ if disable: return
+ try:
+ grid = solution.grid
+ except:
+ grid = solution
+ solution = None
+ if not grid.dimension == 2:
+ raise ValueError("inline plotting so far only available for 2d grids")
+ return
+
+ if figure is None:
+ figure = pyplot.figure(figsize=figsize)
+ show = True
+ else:
+ try:
+ subPlot = figure[1]
+ figure = figure[0]
+ pyplot.subplot(subPlot)
+ except:
+ pass
+ show = False
+ _plotData(figure,grid,solution,level,gridLines,None,
+ vectors,nofVectors,xlim,ylim,clim,cmap,
+ colorbar=colorbar,on="points")
+
+ if show:
+ pyplot.show(block=block)
+
+def plotCellData(solution, level=0, gridLines="black",
+ vectors=None, nofVectors=None, figure=None,
+ xlim=None, ylim=None, clim=None, figsize=None, cmap=None,
+ colorbar=True):
+ if disable: return
+ try:
+ grid = solution.grid
+ except:
+ grid = solution
+ solution = None
+ if not grid.dimension == 2:
+ raise ValueError("inline plotting so far only available for 2d grids")
+ return
+
+ if figure is None:
+ figure = pyplot.figure(figsize=figsize)
+ show = True
+ else:
+ try:
+ subPlot = figure[1]
+ figure = figure[0]
+ pyplot.subplot(subPlot)
+ except:
+ pass
+ show = False
+ _plotData(figure,grid,solution,level,gridLines,None,vectors,nofVectors,xlim,ylim,clim,cmap,
+ colorbar=colorbar,on="cells")
+
+ if show:
+ pyplot.show(block=block)
+
+def plotComponents(solution, level=0, show=None, gridLines="black", figure=None,
+ xlim=None, ylim=None, clim=None, figsize=None, cmap=None):
+ if disable: return
+ try:
+ grid = solution.grid
+ except:
+ grid = solution
+ solution = None
+ if not grid.dimension == 2:
+ raise ValueError("inline plotting so far only available for 2d grids")
+ return
+
+ if not show:
+ show = range(solution.dimRange)
+
+ if figure is None:
+ figure = pyplot.figure(figsize=figsize)
+ offset = 1 if (gridLines is not None) and (gridLines != "") else 0
+ subfig = 101+(len(show)+offset)*10
+
+ # first the grid if required
+ if (gridLines is not None) and (gridLines != ""):
+ pyplot.subplot(subfig)
+ _plotData(figure,grid,None,level,gridLines,None,False,None,xlim,ylim,clim,cmap,
+ on="points")
+
+ # add the data
+ for p in show:
+ pyplot.subplot(subfig+offset+p)
+ _plotData(figure,grid,solution,level,"",p,False,None,xlim,ylim,clim,cmap,False,
+ on="points")
+
+ pyplot.show(block=block)
+
+def plot(solution,*args,**kwargs):
+ if disable: return
+ try:
+ grid = solution.grid
+ except:
+ grid = solution
+ defaultOn = "cells" if any(gt.isNone for gt in grid.indexSet.types(0)) else "points"
+ use = kwargs.pop("on",defaultOn)
+ if use == "points":
+ plotPointData(solution,*args,**kwargs)
+ elif use == "components-points":
+ plotComponents(solution,*args,**kwargs)
+ elif use == "cells":
+ plotCellData(solution,*args,**kwargs)
+ else:
+ raise ValueError("wrong value for 'on' parameter should be one of 'points','cells','components-points'")
+
+def mayaviPointData(solution, level=0, component=0):
+ if disable: return
+ grid = solution.grid
+ from mayavi import mlab
+ triangulation = grid.triangulation(level)
+ z = solution.pointData(level)[:,component]
+ s = mlab.triangular_mesh(triangulation.x, triangulation.y, z,
+ triangulation.triangles)
+ mlab.show(block=block)
--- /dev/null
+add_python_targets(typeregistry
+ __init__
+)
+dune_add_pybind11_module(NAME _typeregistry)
+install(TARGETS _typeregistry LIBRARY DESTINATION python/dune/typeregistry)
--- /dev/null
+from ._typeregistry import *
--- /dev/null
+// -*- tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+// vi: set et ts=4 sw=2 sts=2:
+#include <config.h>
+
+
+#include <dune/python/common/typeregistry.hh>
+
+
+PYBIND11_MODULE( _typeregistry, module )
+{
+ Dune::Python::registerTypeRegistry(module);
+}
--- /dev/null
+from dune.create import get as components
--- /dev/null
+[build-system]
+requires = ["setuptools", "wheel"]
--- /dev/null
+from setuptools import setup, find_packages
+
+pkg = [m for m in "${ProjectPythonRequires}".split(' ') if "dune" not in m]
+
+setup(name="${ProjectName}",
+ namespace_packages=['dune'],
+ description="${ProjectDescription}",
+ version="${ProjectVersionString}",
+ author="${ProjectAuthor}",
+ author_email="${ProjectMaintainerEmail}",
+ packages = find_packages(),
+ zip_safe = 0,
+ package_data = {'': ['*.so']},
+ install_requires = pkg
+ )
--- /dev/null
+import os, sys
+here = os.path.dirname(os.path.abspath(__file__))
+mods = os.path.join(here, "python", "dune")
+sys.path.append(mods)
+
+try:
+ from dune.packagemetadata import metaData
+except ImportError:
+ from packagemetadata import metaData
+from skbuild import setup
+setup(**metaData()[1])
--- /dev/null
+add_subdirectory("bash-completion")
--- /dev/null
+add_subdirectory("completions")
--- /dev/null
+install(FILES
+ dunecontrol
+ DESTINATION ${CMAKE_INSTALL_DATADIR}/bash-completion/completions/
+)
--- /dev/null
+# -*- shell-script -*-
+# bash completion for dunecontrol
+
+_dunecontrol_complete ()
+{
+ local COMMANDS="printdeps vcsetup update configure make all exec bexec status svn git"
+ local COMMAND_OPTS="$(for i in printdeps vcsetup update configure make all exec bexec status svn git; do echo --$i-opts; done)"
+ # per default we offer the list of all core modules and the advertised discretization modules
+ local MODULES="dune-common dune-grid dune-grid-howto dune-istl dune-geometry dune-localfunctions dune-pdelab dune-fem dune-fufem"
+ if test "x$DUNE_MODULES" != x; then
+ MODULES=$DUNE_MODULES
+ fi
+
+ # get completion information
+ local cur prev words cword split
+ _init_completion -s || return
+
+ # check wether we have already seen a command
+ local have_command=0
+ for i in `seq $COMP_CWORD`; do
+ case ${COMP_WORDS[i]} in
+ printdeps|vcsetup|update|configure|make|all|exec|bexec|status|svn|git)
+ have_command=1
+ ;;
+ esac
+ done
+
+ # some options influence the next completion step
+ case $prev in
+ :)
+ COMPREPLY=( $(compgen -W "
+ $COMMANDS
+ " -- $cur) )
+ return 0
+ ;;
+ -h|--help)
+ return 0
+ ;;
+ --module)
+ COMPREPLY=( $(compgen -W " $MODULES " -- $cur ) )
+ compopt -o nospace
+ return 0
+ ;;
+ --only)
+ COMPREPLY=( $(compgen -W " $MODULES " -- $cur ) )
+ compopt -o nospace
+ return 0
+ ;;
+ --opts)
+ compopt -o filenames
+ COMPREPLY=( $( compgen -f -- "$cur" ) \
+ $( compgen -d -- "$cur" ) )
+ return 0
+ ;;
+# git)
+# exec __git_func_wrap __git_main
+# compopt -D
+# COMPREPLY=( $( compgen -W ":" -- $cur ) )
+# return 0
+# ;;
+ esac
+
+ # if we already have a command, we either pass an option to the command,
+ # or we add a colon for the next command
+ if test x$have_command = x1; then
+ COMPREPLY=( $(compgen -W " -- : " -- $cur ) )
+ return 0;
+ fi
+
+ # the usual dunecontrol options
+ COMPREPLY=( $(compgen -W "
+ -h --help --use-cmake --current --current-dep --resume --skipfirst
+ --module= --only=
+ --opts=
+ --builddir=
+ $COMMANDS
+ $COMMAND_OPTS
+ " -- $cur)
+ )
+
+ # don't append space to options --foo=...
+ [[ $COMPREPLY == *= ]] && compopt -o nospace
+} && complete -F _dunecontrol_complete dunecontrol